Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next
authorDavid S. Miller <davem@davemloft.net>
Fri, 4 Oct 2013 17:26:38 +0000 (13:26 -0400)
committerDavid S. Miller <davem@davemloft.net>
Fri, 4 Oct 2013 17:26:38 +0000 (13:26 -0400)
Pablo Neira Ayuso says:

====================
The following patchset contains Netfilter updates for your net-next tree,
mostly ipset improvements and enhancements features, they are:

* Don't call ip_nest_end needlessly in the error path from me, suggested
  by Pablo Neira Ayuso, from Jozsef Kadlecsik.

* Fixed sparse warnings about shadowed variable and missing rcu annotation
  and fix of "may be used uninitialized" warnings, also from Jozsef.

* Renamed simple macro names to avoid namespace issues, reported by David
  Laight, again from Jozsef.

* Use fix sized type for timeout in the extension part, and cosmetic
  ordering of matches and targets separatedly in xt_set.c, from Jozsef.

* Support package fragments for IPv4 protos without ports from Anders K.
  Pedersen. For example this allows a hash:ip,port ipset containing the
  entry 192.168.0.1,gre:0 to match all package fragments for PPTP VPN
  tunnels to/from the host. Without this patch only the first package
  fragment (with fragment offset 0) was matched.

* Introduced a new operation to get both setname and family, from Jozsef.
  ip[6]tables set match and SET target need to know the family of the set
  in order to reject adding rules which refer to a set with a non-mathcing
  family. Currently such rules are silently accepted and then ignored
  instead of generating an error message to the user.

* Reworked extensions support in ipset types from Jozsef. The approach of
  defining structures with all variations is not manageable as the
  number of extensions grows. Therefore a blob for the extensions is
  introduced, somewhat similar to conntrack. The support of extensions
  which need a per data destroy function is added as well.

* When an element timed out in a list:set type of set, the garbage
  collector skipped the checking of the next element. So the purging
  was delayed to the next run of the gc, fixed by Jozsef.

* A small Kconfig fix: NETFILTER_NETLINK cannot be selected and
  ipset requires it.

* hash:net,net type from Oliver Smith. The type provides the ability to
  store pairs of subnets in a set.

* Comment for ipset entries from Oliver Smith. This makes possible to
  annotate entries in a set with comments, for example:

  ipset n foo hash:net,net comment
  ipset a foo 10.0.0.0/21,192.168.1.0/24 comment "office nets A and B"

* Fix of hash types resizing with comment extension from Jozsef.

* Fix of new extensions for list:set type when an element is added
  into a slot from where another element was pushed away from Jozsef.

* Introduction of a common function for the listing of the element
  extensions from Jozsef.

* Net namespace support for ipset from Vitaly Lavrov.

* hash:net,port,net type from Oliver Smith, which makes possible
  to store the triples of two subnets and a protocol, port pair in
  a set.

* Get xt_TCPMSS working with net namespace, by Gao feng.

* Use the proper net netnamespace to allocate skbs, also by Gao feng.

* A couple of cleanups for the conntrack SIP helper, by Holger
  Eitzenberger.

* Extend cttimeout to allow setting default conntrack timeouts via
  nfnetlink, so we can get rid of all our sysctl/proc interfaces in
  the future for timeout tuning, from me.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
809 files changed:
CREDITS
Documentation/block/00-INDEX
Documentation/block/cmdline-partition.txt
Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt
Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt
Documentation/devicetree/bindings/mmc/synopsis-dw-mshc.txt [deleted file]
Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/fsl-tsec-phy.txt
Documentation/devicetree/bindings/pci/designware-pcie.txt
Documentation/devicetree/bindings/serial/qca,ar9330-uart.txt [new file with mode: 0644]
Documentation/devicetree/bindings/tty/serial/qca,ar9330-uart.txt [deleted file]
Documentation/kernel-parameters.txt
Documentation/networking/bonding.txt
Documentation/sound/alsa/HD-Audio-Models.txt
MAINTAINERS
Makefile
arch/Kconfig
arch/alpha/include/uapi/asm/socket.h
arch/arc/include/asm/spinlock.h
arch/arc/include/asm/uaccess.h
arch/arc/kernel/time.c
arch/arc/kernel/unaligned.c
arch/arm/Kconfig
arch/arm/crypto/aes-armv4.S
arch/arm/include/asm/uaccess.h
arch/arm/kernel/entry-common.S
arch/arm/kernel/entry-header.S
arch/arm/kvm/reset.c
arch/arm/mach-imx/clk-imx27.c
arch/arm/mach-imx/clk-imx51-imx53.c
arch/arm/mach-imx/mach-imx6q.c
arch/arm/mach-shmobile/clock-r8a73a4.c
arch/arm/mach-shmobile/clock-sh73a0.c
arch/avr32/include/asm/Kbuild
arch/avr32/include/asm/cputime.h [deleted file]
arch/avr32/include/asm/delay.h [deleted file]
arch/avr32/include/asm/device.h [deleted file]
arch/avr32/include/asm/div64.h [deleted file]
arch/avr32/include/asm/emergency-restart.h [deleted file]
arch/avr32/include/asm/futex.h [deleted file]
arch/avr32/include/asm/irq_regs.h [deleted file]
arch/avr32/include/asm/local.h [deleted file]
arch/avr32/include/asm/local64.h [deleted file]
arch/avr32/include/asm/percpu.h [deleted file]
arch/avr32/include/asm/scatterlist.h [deleted file]
arch/avr32/include/asm/sections.h [deleted file]
arch/avr32/include/asm/topology.h [deleted file]
arch/avr32/include/asm/xor.h [deleted file]
arch/avr32/include/uapi/asm/socket.h
arch/avr32/kernel/process.c
arch/avr32/kernel/time.c
arch/cris/include/uapi/asm/socket.h
arch/frv/include/uapi/asm/socket.h
arch/h8300/include/uapi/asm/socket.h
arch/ia64/include/uapi/asm/socket.h
arch/m32r/include/uapi/asm/socket.h
arch/mips/include/asm/cpu-features.h
arch/mips/include/uapi/asm/socket.h
arch/mips/mm/dma-default.c
arch/mn10300/include/uapi/asm/socket.h
arch/openrisc/include/asm/prom.h
arch/parisc/include/uapi/asm/socket.h
arch/parisc/mm/fault.c
arch/powerpc/boot/Makefile
arch/powerpc/boot/epapr-wrapper.c [new file with mode: 0644]
arch/powerpc/boot/epapr.c
arch/powerpc/boot/of.c
arch/powerpc/boot/wrapper
arch/powerpc/include/asm/irq.h
arch/powerpc/include/asm/processor.h
arch/powerpc/include/uapi/asm/socket.h
arch/powerpc/kernel/asm-offsets.c
arch/powerpc/kernel/irq.c
arch/powerpc/kernel/misc_32.S
arch/powerpc/kernel/misc_64.S
arch/powerpc/kernel/process.c
arch/powerpc/kernel/prom_init.c
arch/powerpc/lib/sstep.c
arch/powerpc/platforms/pseries/smp.c
arch/s390/Kconfig
arch/s390/include/asm/mutex.h
arch/s390/include/asm/processor.h
arch/s390/include/asm/spinlock.h
arch/s390/include/uapi/asm/socket.h
arch/score/Kconfig
arch/score/Makefile
arch/score/include/asm/checksum.h
arch/score/include/asm/io.h
arch/score/include/asm/pgalloc.h
arch/score/kernel/entry.S
arch/score/kernel/process.c
arch/sparc/include/uapi/asm/socket.h
arch/sparc/kernel/ds.c
arch/x86/include/asm/xen/page.h
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/cpu/perf_event_intel_uncore.c
arch/x86/kernel/microcode_amd.c
arch/x86/kernel/reboot.c
arch/x86/kvm/vmx.c
arch/x86/platform/efi/efi.c
arch/x86/xen/p2m.c
arch/x86/xen/spinlock.c
arch/xtensa/include/uapi/asm/socket.h
block/Kconfig
block/Makefile
block/blk-cgroup.c
block/blk-core.c
block/blk-exec.c
block/cfq-iosched.c
block/deadline-iosched.c
block/elevator.c
block/genhd.c
block/partitions/Kconfig
block/partitions/cmdline.c
drivers/acpi/acpi_ipmi.c
drivers/acpi/scan.c
drivers/ata/sata_promise.c
drivers/base/core.c
drivers/bcma/driver_pci.c
drivers/block/cciss.c
drivers/block/cpqarray.c
drivers/bluetooth/ath3k.c
drivers/bluetooth/btusb.c
drivers/char/tpm/xen-tpmfront.c
drivers/clocksource/Kconfig
drivers/clocksource/clksrc-of.c
drivers/clocksource/em_sti.c
drivers/clocksource/exynos_mct.c
drivers/cpufreq/acpi-cpufreq.c
drivers/cpufreq/cpufreq-cpu0.c
drivers/cpufreq/cpufreq.c
drivers/cpufreq/exynos5440-cpufreq.c
drivers/cpufreq/imx6q-cpufreq.c
drivers/gpu/drm/drm_context.c
drivers/gpu/drm/drm_fops.c
drivers/gpu/drm/drm_stub.c
drivers/gpu/drm/exynos/Kconfig
drivers/gpu/drm/exynos/exynos_drm_buf.c
drivers/gpu/drm/exynos/exynos_drm_fbdev.c
drivers/gpu/drm/i2c/tda998x_drv.c
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gpu_error.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/intel_ddi.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_dvo.c
drivers/gpu/drm/i915/intel_panel.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_sdvo.c
drivers/gpu/drm/i915/intel_tv.c
drivers/gpu/drm/msm/adreno/adreno_gpu.c
drivers/gpu/drm/msm/mdp4/mdp4_kms.c
drivers/gpu/drm/msm/msm_drv.c
drivers/gpu/drm/msm/msm_drv.h
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/msm/msm_gem.h
drivers/gpu/drm/msm/msm_gem_submit.c
drivers/gpu/drm/msm/msm_gpu.c
drivers/gpu/drm/radeon/btc_dpm.c
drivers/gpu/drm/radeon/btc_dpm.h
drivers/gpu/drm/radeon/ci_dpm.c
drivers/gpu/drm/radeon/cik.c
drivers/gpu/drm/radeon/ni_dpm.c
drivers/gpu/drm/radeon/r100.c
drivers/gpu/drm/radeon/r600_dpm.c
drivers/gpu/drm/radeon/r600_hdmi.c
drivers/gpu/drm/radeon/radeon_asic.c
drivers/gpu/drm/radeon/radeon_atombios.c
drivers/gpu/drm/radeon/radeon_cs.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_pm.c
drivers/gpu/drm/radeon/radeon_ring.c
drivers/gpu/drm/radeon/radeon_uvd.c
drivers/gpu/drm/radeon/si_dpm.c
drivers/gpu/drm/radeon/uvd_v1_0.c
drivers/hv/connection.c
drivers/hv/hv_kvp.c
drivers/hv/hv_snapshot.c
drivers/hv/hv_util.c
drivers/hwmon/applesmc.c
drivers/i2c/busses/i2c-designware-core.c
drivers/i2c/busses/i2c-ismt.c
drivers/i2c/busses/i2c-mv64xxx.c
drivers/i2c/busses/i2c-s3c2410.c
drivers/iio/accel/bma180.c
drivers/iio/adc/at91_adc.c
drivers/iio/buffer_cb.c
drivers/iio/dac/mcp4725.c
drivers/iio/iio_core.h
drivers/iio/industrialio-buffer.c
drivers/iio/industrialio-core.c
drivers/iio/industrialio-event.c
drivers/iio/temperature/tmp006.c
drivers/infiniband/core/cma.c
drivers/isdn/hardware/eicon/um_idi.c
drivers/md/bcache/bcache.h
drivers/md/bcache/bset.c
drivers/md/bcache/btree.c
drivers/md/bcache/journal.c
drivers/md/bcache/request.c
drivers/md/bcache/sysfs.c
drivers/md/bcache/util.c
drivers/md/bcache/util.h
drivers/md/bcache/writeback.c
drivers/md/dm-io.c
drivers/md/dm-mpath.c
drivers/md/dm-snap-persistent.c
drivers/md/dm-snap.c
drivers/md/dm-stats.c
drivers/md/dm-thin.c
drivers/md/dm.c
drivers/md/dm.h
drivers/misc/mei/amthif.c
drivers/misc/mei/bus.c
drivers/misc/mei/client.h
drivers/misc/mei/hbm.c
drivers/misc/mei/init.c
drivers/misc/mei/main.c
drivers/misc/mei/mei_dev.h
drivers/net/bonding/bond_3ad.c
drivers/net/bonding/bond_alb.c
drivers/net/bonding/bond_alb.h
drivers/net/bonding/bond_main.c
drivers/net/bonding/bond_procfs.c
drivers/net/bonding/bond_sysfs.c
drivers/net/bonding/bonding.h
drivers/net/can/at91_can.c
drivers/net/can/bfin_can.c
drivers/net/can/c_can/c_can_pci.c
drivers/net/can/c_can/c_can_platform.c
drivers/net/can/cc770/cc770_platform.c
drivers/net/can/flexcan.c
drivers/net/can/janz-ican3.c
drivers/net/can/mcp251x.c
drivers/net/can/pch_can.c
drivers/net/can/sja1000/ems_pci.c
drivers/net/can/sja1000/kvaser_pci.c
drivers/net/can/sja1000/peak_pci.c
drivers/net/can/sja1000/plx_pci.c
drivers/net/can/sja1000/sja1000_platform.c
drivers/net/can/slcan.c
drivers/net/can/softing/softing_main.c
drivers/net/can/ti_hecc.c
drivers/net/can/usb/peak_usb/pcan_usb_core.c
drivers/net/ethernet/3com/Kconfig
drivers/net/ethernet/8390/ax88796.c
drivers/net/ethernet/amd/atarilance.c
drivers/net/ethernet/amd/au1000_eth.c
drivers/net/ethernet/amd/lance.c
drivers/net/ethernet/amd/pcnet32.c
drivers/net/ethernet/apple/bmac.c
drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
drivers/net/ethernet/atheros/atl1e/atl1e_main.c
drivers/net/ethernet/broadcom/b44.c
drivers/net/ethernet/broadcom/bgmac.c
drivers/net/ethernet/broadcom/bnx2.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
drivers/net/ethernet/broadcom/cnic.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/chelsio/cxgb/pm3393.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/davicom/dm9000.c
drivers/net/ethernet/emulex/benet/be.h
drivers/net/ethernet/emulex/benet/be_cmds.c
drivers/net/ethernet/emulex/benet/be_cmds.h
drivers/net/ethernet/emulex/benet/be_ethtool.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
drivers/net/ethernet/freescale/gianfar_ptp.c
drivers/net/ethernet/freescale/ucc_geth.c
drivers/net/ethernet/hp/hp100.c
drivers/net/ethernet/i825xx/82596.c
drivers/net/ethernet/i825xx/lib82596.c
drivers/net/ethernet/ibm/emac/core.c
drivers/net/ethernet/ibm/emac/debug.h
drivers/net/ethernet/ibm/emac/rgmii.h
drivers/net/ethernet/ibm/emac/tah.h
drivers/net/ethernet/ibm/emac/zmii.h
drivers/net/ethernet/ibm/ibmveth.c
drivers/net/ethernet/intel/e1000/e1000.h
drivers/net/ethernet/intel/e1000e/e1000.h
drivers/net/ethernet/intel/i40e/i40e_adminq.c
drivers/net/ethernet/intel/i40e/i40e_common.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/igb/e1000_82575.h
drivers/net/ethernet/intel/igb/e1000_hw.h
drivers/net/ethernet/intel/igb/e1000_i210.h
drivers/net/ethernet/intel/igb/e1000_mac.h
drivers/net/ethernet/intel/igb/igb.h
drivers/net/ethernet/intel/igb/igb_ethtool.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/igbvf/igbvf.h
drivers/net/ethernet/intel/igbvf/vf.c
drivers/net/ethernet/intel/ixgb/ixgb.h
drivers/net/ethernet/intel/ixgb/ixgb_hw.h
drivers/net/ethernet/intel/ixgbe/ixgbe.h
drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
drivers/net/ethernet/intel/ixgbevf/ethtool.c
drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
drivers/net/ethernet/intel/ixgbevf/vf.c
drivers/net/ethernet/jme.c
drivers/net/ethernet/korina.c
drivers/net/ethernet/marvell/mv643xx_eth.c
drivers/net/ethernet/marvell/skge.c
drivers/net/ethernet/micrel/ks8851_mll.c
drivers/net/ethernet/moxa/moxart_ether.c
drivers/net/ethernet/myricom/myri10ge/myri10ge.c
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
drivers/net/ethernet/qlogic/netxen/netxen_nic.h
drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h
drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
drivers/net/ethernet/qlogic/qlge/qlge.h
drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
drivers/net/ethernet/qlogic/qlge/qlge_main.c
drivers/net/ethernet/qlogic/qlge/qlge_mpi.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/sfc/efx.h
drivers/net/ethernet/sfc/mcdi.c
drivers/net/ethernet/sfc/mcdi.h
drivers/net/ethernet/sfc/mdio_10g.h
drivers/net/ethernet/sfc/nic.h
drivers/net/ethernet/sfc/phy.h
drivers/net/ethernet/sfc/selftest.h
drivers/net/ethernet/sgi/meth.c
drivers/net/ethernet/smsc/smsc911x.c
drivers/net/ethernet/stmicro/stmmac/common.h
drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
drivers/net/ethernet/stmicro/stmmac/mmc.h
drivers/net/ethernet/stmicro/stmmac/stmmac.h
drivers/net/ethernet/sun/cassini.c
drivers/net/ethernet/sun/sungem.c
drivers/net/ethernet/sun/sunhme.c
drivers/net/ethernet/sun/sunqe.c
drivers/net/ethernet/ti/cpsw-phy-sel.c
drivers/net/ethernet/ti/cpsw.c
drivers/net/ethernet/ti/cpts.h
drivers/net/ethernet/ti/davinci_emac.c
drivers/net/ethernet/tile/tilegx.c
drivers/net/ethernet/toshiba/ps3_gelic_net.h
drivers/net/ethernet/toshiba/ps3_gelic_wireless.h
drivers/net/ethernet/toshiba/spider_net.h
drivers/net/ethernet/via/via-rhine.c
drivers/net/ethernet/xilinx/ll_temac_main.c
drivers/net/ethernet/xilinx/xilinx_emaclite.c
drivers/net/fddi/skfp/fplustm.c
drivers/net/fddi/skfp/h/smc.h
drivers/net/fddi/skfp/skfddi.c
drivers/net/hamradio/baycom_ser_fdx.c
drivers/net/hamradio/baycom_ser_hdx.c
drivers/net/irda/sir-dev.h
drivers/net/phy/at803x.c
drivers/net/phy/marvell.c
drivers/net/plip/plip.c
drivers/net/slip/slip.c
drivers/net/usb/catc.c
drivers/net/usb/dm9601.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/usbnet.c
drivers/net/vmxnet3/vmxnet3_int.h
drivers/net/vxlan.c
drivers/net/wan/x25_asy.h
drivers/net/wan/z85230.h
drivers/net/wimax/i2400m/i2400m-usb.h
drivers/net/wimax/i2400m/i2400m.h
drivers/net/wireless/ath/ath10k/debug.h
drivers/net/wireless/ath/ath10k/wmi.c
drivers/net/wireless/ath/ath6kl/common.h
drivers/net/wireless/ath/ath6kl/debug.h
drivers/net/wireless/ath/ath9k/ath9k.h
drivers/net/wireless/ath/ath9k/recv.c
drivers/net/wireless/ath/ath9k/xmit.c
drivers/net/wireless/ath/wil6210/cfg80211.c
drivers/net/wireless/atmel.c
drivers/net/wireless/b43/xmit.c
drivers/net/wireless/b43legacy/xmit.c
drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
drivers/net/wireless/brcm80211/brcmfmac/dhd.h
drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h
drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h
drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
drivers/net/wireless/brcm80211/brcmfmac/usb.c
drivers/net/wireless/brcm80211/brcmsmac/aiutils.h
drivers/net/wireless/brcm80211/brcmsmac/ampdu.h
drivers/net/wireless/brcm80211/brcmsmac/antsel.h
drivers/net/wireless/brcm80211/brcmsmac/channel.h
drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
drivers/net/wireless/brcm80211/brcmsmac/main.c
drivers/net/wireless/brcm80211/brcmsmac/main.h
drivers/net/wireless/brcm80211/brcmsmac/phy/phy_hal.h
drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
drivers/net/wireless/brcm80211/brcmsmac/phy_shim.h
drivers/net/wireless/brcm80211/brcmsmac/pmu.h
drivers/net/wireless/brcm80211/brcmsmac/pub.h
drivers/net/wireless/brcm80211/brcmsmac/rate.h
drivers/net/wireless/brcm80211/brcmsmac/stf.h
drivers/net/wireless/brcm80211/brcmsmac/ucode_loader.h
drivers/net/wireless/brcm80211/include/brcmu_d11.h
drivers/net/wireless/brcm80211/include/brcmu_utils.h
drivers/net/wireless/cw1200/cw1200_spi.c
drivers/net/wireless/cw1200/fwio.c
drivers/net/wireless/cw1200/hwbus.h
drivers/net/wireless/cw1200/hwio.c
drivers/net/wireless/hostap/hostap_info.c
drivers/net/wireless/ipw2x00/ipw2200.c
drivers/net/wireless/ipw2x00/libipw.h
drivers/net/wireless/iwlegacy/3945.h
drivers/net/wireless/iwlegacy/4965.h
drivers/net/wireless/iwlegacy/common.h
drivers/net/wireless/iwlwifi/dvm/agn.h
drivers/net/wireless/iwlwifi/dvm/dev.h
drivers/net/wireless/iwlwifi/dvm/rs.h
drivers/net/wireless/iwlwifi/mvm/rs.h
drivers/net/wireless/mwifiex/11n_aggr.c
drivers/net/wireless/mwifiex/11n_aggr.h
drivers/net/wireless/mwifiex/cmdevt.c
drivers/net/wireless/mwifiex/usb.c
drivers/net/wireless/mwifiex/wmm.c
drivers/net/wireless/mwifiex/wmm.h
drivers/net/wireless/orinoco/orinoco.h
drivers/net/wireless/p54/p54usb.c
drivers/net/wireless/prism54/isl_ioctl.c
drivers/net/wireless/prism54/islpci_dev.c
drivers/net/wireless/prism54/oid_mgt.c
drivers/net/wireless/rtlwifi/cam.h
drivers/net/wireless/rtlwifi/core.c
drivers/net/wireless/rtlwifi/efuse.h
drivers/net/wireless/rtlwifi/rtl8188ee/phy.h
drivers/net/wireless/rtlwifi/rtl8192ce/phy.h
drivers/net/wireless/rtlwifi/rtl8192ce/rf.h
drivers/net/wireless/rtlwifi/rtl8192cu/rf.h
drivers/net/wireless/rtlwifi/rtl8192de/hw.h
drivers/net/wireless/rtlwifi/rtl8192de/phy.h
drivers/net/wireless/rtlwifi/rtl8192de/rf.h
drivers/net/wireless/rtlwifi/rtl8723ae/phy.h
drivers/net/wireless/rtlwifi/rtl8723ae/rf.h
drivers/net/wireless/rtlwifi/wifi.h
drivers/net/xen-netback/netback.c
drivers/net/xen-netback/xenbus.c
drivers/net/xen-netfront.c
drivers/pci/pci-acpi.c
drivers/pci/pci.c
drivers/regulator/da9063-regulator.c
drivers/regulator/palmas-regulator.c
drivers/regulator/ti-abb-regulator.c
drivers/regulator/wm831x-ldo.c
drivers/regulator/wm8350-regulator.c
drivers/staging/comedi/Kconfig
drivers/staging/dgap/dgap_driver.c
drivers/staging/dgnc/dgnc_driver.c
drivers/staging/iio/Kconfig
drivers/staging/iio/light/isl29018.c
drivers/staging/iio/magnetometer/hmc5843.c
drivers/staging/iio/meter/ade7854-spi.c
drivers/staging/imx-drm/imx-drm-core.c
drivers/staging/line6/toneport.c
drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
drivers/staging/lustre/lustre/Kconfig
drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
drivers/staging/lustre/lustre/libcfs/workitem.c
drivers/staging/lustre/lustre/obdecho/echo_client.c
drivers/staging/lustre/lustre/ptlrpc/pinger.c
drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
drivers/staging/lustre/lustre/ptlrpc/service.c
drivers/staging/octeon-usb/cvmx-usb.c
drivers/staging/rtl8188eu/core/rtw_ieee80211.c
drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
drivers/staging/rtl8188eu/core/rtw_mp.c
drivers/staging/rtl8188eu/core/rtw_wlan_util.c
drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
drivers/staging/rtl8188eu/include/odm.h
drivers/staging/rtl8188eu/include/rtl8188e_hal.h
drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
drivers/staging/rtl8188eu/os_dep/usb_intf.c
drivers/staging/rtl8192u/r819xU_cmdpkt.c
drivers/staging/vt6656/card.c
drivers/staging/vt6656/iwctl.c
drivers/staging/vt6656/main_usb.c
drivers/staging/vt6656/rxtx.c
drivers/staging/xillybus/xillybus_core.c
drivers/staging/zram/zram_drv.c
drivers/tty/n_tty.c
drivers/tty/serial/pch_uart.c
drivers/tty/serial/serial-tegra.c
drivers/tty/tty_ioctl.c
drivers/usb/chipidea/Kconfig
drivers/usb/chipidea/ci_hdrc_imx.c
drivers/usb/chipidea/core.c
drivers/usb/chipidea/udc.c
drivers/usb/core/devio.c
drivers/usb/core/hub.c
drivers/usb/dwc3/Kconfig
drivers/usb/dwc3/dwc3-pci.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/cdc2.c
drivers/usb/gadget/dummy_hcd.c
drivers/usb/gadget/f_ecm.c
drivers/usb/gadget/f_eem.c
drivers/usb/gadget/f_fs.c
drivers/usb/gadget/f_mass_storage.c
drivers/usb/gadget/fotg210-udc.c
drivers/usb/gadget/fusb300_udc.c
drivers/usb/gadget/multi.c
drivers/usb/gadget/mv_u3d_core.c
drivers/usb/gadget/s3c-hsotg.c
drivers/usb/host/ehci-fsl.c
drivers/usb/host/ehci-grlib.c
drivers/usb/host/ehci-hcd.c
drivers/usb/host/ehci-mv.c
drivers/usb/host/ehci-octeon.c
drivers/usb/host/ehci-pci.c
drivers/usb/host/ehci-pmcmsp.c
drivers/usb/host/ehci-ppc-of.c
drivers/usb/host/ehci-ps3.c
drivers/usb/host/ehci-q.c
drivers/usb/host/ehci-sead3.c
drivers/usb/host/ehci-sh.c
drivers/usb/host/ehci-tilegx.c
drivers/usb/host/ehci-w90x900.c
drivers/usb/host/ehci-xilinx-of.c
drivers/usb/host/fsl-mph-dr-of.c
drivers/usb/host/imx21-hcd.c
drivers/usb/host/ohci-hcd.c
drivers/usb/host/ohci-q.c
drivers/usb/host/uhci-pci.c
drivers/usb/host/uhci-q.c
drivers/usb/host/xhci-hub.c
drivers/usb/host/xhci-mem.c
drivers/usb/host/xhci-pci.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.c
drivers/usb/host/xhci.h
drivers/usb/phy/phy-omap-usb3.c
drivers/usb/serial/Kconfig
drivers/usb/serial/pl2303.c
drivers/vhost/scsi.c
drivers/vhost/vhost.c
drivers/video/mmp/hw/mmp_ctrl.c
drivers/video/mxsfb.c
drivers/video/neofb.c
drivers/video/of_display_timing.c
drivers/video/omap2/displays-new/Kconfig
drivers/video/omap2/displays-new/connector-analog-tv.c
drivers/video/omap2/displays-new/connector-dvi.c
drivers/video/omap2/displays-new/connector-hdmi.c
drivers/video/omap2/dss/dispc.c
drivers/video/s3fb.c
drivers/xen/balloon.c
fs/afs/dir.c
fs/binfmt_elf.c
fs/bio-integrity.c
fs/bio.c
fs/btrfs/btrfs_inode.h
fs/btrfs/ctree.c
fs/btrfs/ctree.h
fs/btrfs/dev-replace.c
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/file.c
fs/btrfs/free-space-cache.c
fs/btrfs/free-space-cache.h
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/ordered-data.c
fs/btrfs/ordered-data.h
fs/btrfs/relocation.c
fs/btrfs/scrub.c
fs/btrfs/super.c
fs/btrfs/transaction.c
fs/btrfs/tree-log.c
fs/btrfs/volumes.c
fs/cachefiles/namei.c
fs/cachefiles/xattr.c
fs/nfs/dir.c
fs/nfs/nfs4file.c
fs/nfs/nfs4filelayoutdev.c
fs/nfs/nfs4proc.c
fs/nilfs2/page.c
fs/nilfs2/segment.c
fs/ocfs2/dcache.c
fs/ocfs2/super.c
fs/reiserfs/journal.c
fs/super.c
fs/sysv/super.c
fs/udf/ialloc.c
fs/udf/super.c
fs/udf/udf_sb.h
fs/xfs/xfs_buf_item.c
fs/xfs/xfs_da_btree.c
fs/xfs/xfs_fs.h
fs/xfs/xfs_icache.c
fs/xfs/xfs_log_recover.c
include/asm-generic/vtime.h
include/drm/drmP.h
include/linux/balloon_compaction.h
include/linux/bcma/bcma_driver_pci.h
include/linux/blkdev.h
include/linux/device-mapper.h
include/linux/etherdevice.h
include/linux/fcdevice.h
include/linux/fddidevice.h
include/linux/hippidevice.h
include/linux/hyperv.h
include/linux/inetdevice.h
include/linux/ipv6.h
include/linux/kernel.h
include/linux/memcontrol.h
include/linux/mutex.h
include/linux/net.h
include/linux/netdevice.h
include/linux/netfilter.h
include/linux/netfilter/nf_conntrack_common.h
include/linux/netfilter/nf_conntrack_h323.h
include/linux/netfilter/nf_conntrack_proto_gre.h
include/linux/netfilter/nf_conntrack_sip.h
include/linux/netfilter/nfnetlink.h
include/linux/netfilter/nfnetlink_acct.h
include/linux/netfilter/x_tables.h
include/linux/netfilter_bridge.h
include/linux/netfilter_ipv4.h
include/linux/netfilter_ipv6.h
include/linux/nfs_xdr.h
include/linux/of_irq.h
include/linux/regulator/driver.h
include/linux/skbuff.h
include/linux/smp.h
include/linux/ssb/ssb_driver_gige.h
include/linux/usb/usbnet.h
include/net/addrconf.h
include/net/bluetooth/hci.h
include/net/flow_keys.h
include/net/inet_hashtables.h
include/net/inet_sock.h
include/net/inet_timewait_sock.h
include/net/ip.h
include/net/ip6_fib.h
include/net/ip_vs.h
include/net/mrp.h
include/net/net_namespace.h
include/net/netfilter/nf_conntrack_synproxy.h
include/net/netns/ipv4.h
include/net/route.h
include/net/secure_seq.h
include/net/sock.h
include/net/xfrm.h
include/trace/events/block.h
include/trace/events/btrfs.h
include/uapi/asm-generic/socket.h
include/uapi/drm/radeon_drm.h
include/uapi/linux/can/bcm.h
include/uapi/linux/can/error.h
include/uapi/linux/can/gw.h
include/uapi/linux/can/netlink.h
include/uapi/linux/can/raw.h
include/uapi/linux/if_bonding.h
include/uapi/linux/perf_event.h
ipc/msg.c
ipc/sem.c
ipc/shm.c
ipc/util.c
ipc/util.h
kernel/audit.c
kernel/context_tracking.c
kernel/events/core.c
kernel/kmod.c
kernel/params.c
kernel/pid.c
kernel/reboot.c
kernel/sched/fair.c
kernel/watchdog.c
lib/hexdump.c
lib/kobject.c
lib/lockref.c
mm/bounce.c
mm/compaction.c
mm/hwpoison-inject.c
mm/madvise.c
mm/memcontrol.c
mm/memory-failure.c
mm/migrate.c
mm/mlock.c
mm/page_alloc.c
mm/vmscan.c
net/802/mrp.c
net/8021q/vlan.c
net/bluetooth/hci_core.c
net/bluetooth/hci_event.c
net/bluetooth/l2cap_core.c
net/bluetooth/rfcomm/tty.c
net/bridge/br_multicast.c
net/bridge/netfilter/ebt_among.c
net/core/dev.c
net/core/flow_dissector.c
net/core/neighbour.c
net/core/rtnetlink.c
net/core/secure_seq.c
net/core/sock.c
net/ethernet/eth.c
net/ipv4/af_inet.c
net/ipv4/fib_trie.c
net/ipv4/icmp.c
net/ipv4/igmp.c
net/ipv4/inet_connection_sock.c
net/ipv4/inet_diag.c
net/ipv4/inet_hashtables.c
net/ipv4/inet_timewait_sock.c
net/ipv4/ip_output.c
net/ipv4/ip_sockglue.c
net/ipv4/ip_tunnel.c
net/ipv4/ip_tunnel_core.c
net/ipv4/ip_vti.c
net/ipv4/netfilter/ipt_SYNPROXY.c
net/ipv4/ping.c
net/ipv4/raw.c
net/ipv4/sysctl_net_ipv4.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_output.c
net/ipv4/udp.c
net/ipv4/xfrm4_mode_tunnel.c
net/ipv6/addrconf.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_output.c
net/ipv6/ip6_tunnel.c
net/ipv6/mcast.c
net/ipv6/netfilter/ip6t_SYNPROXY.c
net/ipv6/raw.c
net/ipv6/route.c
net/ipv6/sit.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c
net/lapb/lapb_timer.c
net/mac80211/trace.h
net/netfilter/ipvs/ip_vs_core.c
net/netfilter/ipvs/ip_vs_ctl.c
net/netfilter/ipvs/ip_vs_est.c
net/netfilter/ipvs/ip_vs_lblc.c
net/netfilter/ipvs/ip_vs_lblcr.c
net/netfilter/ipvs/ip_vs_nq.c
net/netfilter/ipvs/ip_vs_sed.c
net/netfilter/ipvs/ip_vs_wlc.c
net/netfilter/nf_synproxy_core.c
net/openvswitch/vport-vxlan.c
net/sched/cls_basic.c
net/sched/em_meta.c
net/sched/sch_fq.c
net/sctp/socket.c
net/sunrpc/auth_gss/auth_gss.c
net/xfrm/xfrm_state.c
scripts/checkpatch.pl
security/apparmor/crypto.c
security/apparmor/include/policy.h
security/apparmor/policy.c
security/selinux/hooks.c
sound/core/compress_offload.c
sound/pci/hda/patch_cirrus.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
tools/lib/lk/debugfs.c
tools/perf/arch/x86/util/tsc.c
tools/perf/builtin-inject.c
tools/perf/builtin-kmem.c
tools/perf/builtin-report.c
tools/perf/builtin-script.c
tools/perf/builtin-trace.c
tools/perf/config/Makefile
tools/perf/config/feature-tests.mak
tools/perf/util/annotate.c
tools/perf/util/dwarf-aux.c
tools/perf/util/dwarf-aux.h
tools/perf/util/header.c
tools/perf/util/hist.c
tools/perf/util/machine.c
tools/perf/util/probe-finder.c
tools/perf/util/probe-finder.h
tools/perf/util/session.c
tools/perf/util/session.h
tools/perf/util/symbol-elf.c
tools/perf/util/trace-event-parse.c

diff --git a/CREDITS b/CREDITS
index 9416a9a8b95e6c4404c80ade376feff7b6df406a..0640e16504832e43c2d3b9bb82b6b5fe3f5bea48 100644 (file)
--- a/CREDITS
+++ b/CREDITS
@@ -2808,8 +2808,7 @@ S: Ottawa, Ontario
 S: Canada K2P 0X8
 
 N: Mikael Pettersson
-E: mikpe@it.uu.se
-W: http://user.it.uu.se/~mikpe/linux/
+E: mikpelinux@gmail.com
 D: Miscellaneous fixes
 
 N: Reed H. Petty
index d18ecd827c408d0fb42a8d8a7fc669ce88c95fc2..929d9904f74b7eb94bac71e81308b0bf335c3108 100644 (file)
@@ -6,6 +6,8 @@ capability.txt
        - Generic Block Device Capability (/sys/block/<device>/capability)
 cfq-iosched.txt
        - CFQ IO scheduler tunables
+cmdline-partition.txt
+       - how to specify block device partitions on kernel command line
 data-integrity.txt
        - Block data integrity
 deadline-iosched.txt
index 2bbf4cc40c3f7f92a02df9bd42b1bdabc8a93791..525b9f6d7fb49e4c9bbd5f7ea76833dcde640e59 100644 (file)
@@ -1,9 +1,9 @@
-Embedded device command line partition
+Embedded device command line partition parsing
 =====================================================================
 
-Read block device partition table from command line.
-The partition used for fixed block device (eMMC) embedded device.
-It is no MBR, save storage space. Bootloader can be easily accessed
+Support for reading the block device partition table from the command line.
+It is typically used for fixed block (eMMC) embedded devices.
+It has no MBR, so saves storage space. Bootloader can be easily accessed
 by absolute address of data on the block device.
 Users can easily change the partition.
 
index 6d1c0988cfc7c7a0466d9f0cc18cc7feacdb0d2b..c67b975c89063f51fa20ae563f601c7c6113fe08 100644 (file)
@@ -1,11 +1,11 @@
-* Samsung Exynos specific extensions to the Synopsis Designware Mobile
+* Samsung Exynos specific extensions to the Synopsys Designware Mobile
   Storage Host Controller
 
-The Synopsis designware mobile storage host controller is used to interface
+The Synopsys designware mobile storage host controller is used to interface
 a SoC with storage medium such as eMMC or SD/MMC cards. This file documents
-differences between the core Synopsis dw mshc controller properties described
-by synopsis-dw-mshc.txt and the properties used by the Samsung Exynos specific
-extensions to the Synopsis Designware Mobile Storage Host Controller.
+differences between the core Synopsys dw mshc controller properties described
+by synopsys-dw-mshc.txt and the properties used by the Samsung Exynos specific
+extensions to the Synopsys Designware Mobile Storage Host Controller.
 
 Required Properties:
 
index 8a3d91d47b6af2e3e99d6182e25f21007b129f94..c559f3f36309e57f1eccda86e926771047960cbb 100644 (file)
@@ -1,11 +1,11 @@
-* Rockchip specific extensions to the Synopsis Designware Mobile
+* Rockchip specific extensions to the Synopsys Designware Mobile
   Storage Host Controller
 
-The Synopsis designware mobile storage host controller is used to interface
+The Synopsys designware mobile storage host controller is used to interface
 a SoC with storage medium such as eMMC or SD/MMC cards. This file documents
-differences between the core Synopsis dw mshc controller properties described
-by synopsis-dw-mshc.txt and the properties used by the Rockchip specific
-extensions to the Synopsis Designware Mobile Storage Host Controller.
+differences between the core Synopsys dw mshc controller properties described
+by synopsys-dw-mshc.txt and the properties used by the Rockchip specific
+extensions to the Synopsys Designware Mobile Storage Host Controller.
 
 Required Properties:
 
diff --git a/Documentation/devicetree/bindings/mmc/synopsis-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/synopsis-dw-mshc.txt
deleted file mode 100644 (file)
index cdcebea..0000000
+++ /dev/null
@@ -1,107 +0,0 @@
-* Synopsis Designware Mobile Storage Host Controller
-
-The Synopsis designware mobile storage host controller is used to interface
-a SoC with storage medium such as eMMC or SD/MMC cards. This file documents
-differences between the core mmc properties described by mmc.txt and the
-properties used by the Synopsis Designware Mobile Storage Host Controller.
-
-Required Properties:
-
-* compatible: should be
-       - snps,dw-mshc: for controllers compliant with synopsis dw-mshc.
-* #address-cells: should be 1.
-* #size-cells: should be 0.
-
-# Slots: The slot specific information are contained within child-nodes with
-  each child-node representing a supported slot. There should be atleast one
-  child node representing a card slot. The name of the child node representing
-  the slot is recommended to be slot@n where n is the unique number of the slot
-  connnected to the controller. The following are optional properties which
-  can be included in the slot child node.
-
-       * reg: specifies the physical slot number. The valid values of this
-         property is 0 to (num-slots -1), where num-slots is the value
-         specified by the num-slots property.
-
-       * bus-width: as documented in mmc core bindings.
-
-       * wp-gpios: specifies the write protect gpio line. The format of the
-         gpio specifier depends on the gpio controller. If a GPIO is not used
-         for write-protect, this property is optional.
-
-       * disable-wp: If the wp-gpios property isn't present then (by default)
-         we'd assume that the write protect is hooked up directly to the
-         controller's special purpose write protect line (accessible via
-         the WRTPRT register).  However, it's possible that we simply don't
-         want write protect.  In that case specify 'disable-wp'.
-         NOTE: This property is not required for slots known to always
-         connect to eMMC or SDIO cards.
-
-Optional properties:
-
-* clocks: from common clock binding: handle to biu and ciu clocks for the
-  bus interface unit clock and the card interface unit clock.
-
-* clock-names: from common clock binding: Shall be "biu" and "ciu".
-  If the biu clock is missing we'll simply skip enabling it.  If the
-  ciu clock is missing we'll just assume that the clock is running at
-  clock-frequency.  It is an error to omit both the ciu clock and the
-  clock-frequency.
-
-* clock-frequency: should be the frequency (in Hz) of the ciu clock.  If this
-  is specified and the ciu clock is specified then we'll try to set the ciu
-  clock to this at probe time.
-
-* num-slots: specifies the number of slots supported by the controller.
-  The number of physical slots actually used could be equal or less than the
-  value specified by num-slots. If this property is not specified, the value
-  of num-slot property is assumed to be 1.
-
-* fifo-depth: The maximum size of the tx/rx fifo's. If this property is not
-  specified, the default value of the fifo size is determined from the
-  controller registers.
-
-* card-detect-delay: Delay in milli-seconds before detecting card after card
-  insert event. The default value is 0.
-
-* supports-highspeed: Enables support for high speed cards (up to 50MHz)
-
-* broken-cd: as documented in mmc core bindings.
-
-* vmmc-supply: The phandle to the regulator to use for vmmc.  If this is
-  specified we'll defer probe until we can find this regulator.
-
-Aliases:
-
-- All the MSHC controller nodes should be represented in the aliases node using
-  the following format 'mshc{n}' where n is a unique number for the alias.
-
-Example:
-
-The MSHC controller node can be split into two portions, SoC specific and
-board specific portions as listed below.
-
-       dwmmc0@12200000 {
-               compatible = "snps,dw-mshc";
-               clocks = <&clock 351>, <&clock 132>;
-               clock-names = "biu", "ciu";
-               reg = <0x12200000 0x1000>;
-               interrupts = <0 75 0>;
-               #address-cells = <1>;
-               #size-cells = <0>;
-       };
-
-       dwmmc0@12200000 {
-               clock-frequency = <400000000>;
-               num-slots = <1>;
-               supports-highspeed;
-               broken-cd;
-               fifo-depth = <0x80>;
-               card-detect-delay = <200>;
-               vmmc-supply = <&buck8>;
-
-               slot@0 {
-                       reg = <0>;
-                       bus-width = <8>;
-               };
-       };
diff --git a/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt
new file mode 100644 (file)
index 0000000..066a78b
--- /dev/null
@@ -0,0 +1,107 @@
+* Synopsys Designware Mobile Storage Host Controller
+
+The Synopsys designware mobile storage host controller is used to interface
+a SoC with storage medium such as eMMC or SD/MMC cards. This file documents
+differences between the core mmc properties described by mmc.txt and the
+properties used by the Synopsys Designware Mobile Storage Host Controller.
+
+Required Properties:
+
+* compatible: should be
+       - snps,dw-mshc: for controllers compliant with synopsys dw-mshc.
+* #address-cells: should be 1.
+* #size-cells: should be 0.
+
+# Slots: The slot specific information are contained within child-nodes with
+  each child-node representing a supported slot. There should be atleast one
+  child node representing a card slot. The name of the child node representing
+  the slot is recommended to be slot@n where n is the unique number of the slot
+  connnected to the controller. The following are optional properties which
+  can be included in the slot child node.
+
+       * reg: specifies the physical slot number. The valid values of this
+         property is 0 to (num-slots -1), where num-slots is the value
+         specified by the num-slots property.
+
+       * bus-width: as documented in mmc core bindings.
+
+       * wp-gpios: specifies the write protect gpio line. The format of the
+         gpio specifier depends on the gpio controller. If a GPIO is not used
+         for write-protect, this property is optional.
+
+       * disable-wp: If the wp-gpios property isn't present then (by default)
+         we'd assume that the write protect is hooked up directly to the
+         controller's special purpose write protect line (accessible via
+         the WRTPRT register).  However, it's possible that we simply don't
+         want write protect.  In that case specify 'disable-wp'.
+         NOTE: This property is not required for slots known to always
+         connect to eMMC or SDIO cards.
+
+Optional properties:
+
+* clocks: from common clock binding: handle to biu and ciu clocks for the
+  bus interface unit clock and the card interface unit clock.
+
+* clock-names: from common clock binding: Shall be "biu" and "ciu".
+  If the biu clock is missing we'll simply skip enabling it.  If the
+  ciu clock is missing we'll just assume that the clock is running at
+  clock-frequency.  It is an error to omit both the ciu clock and the
+  clock-frequency.
+
+* clock-frequency: should be the frequency (in Hz) of the ciu clock.  If this
+  is specified and the ciu clock is specified then we'll try to set the ciu
+  clock to this at probe time.
+
+* num-slots: specifies the number of slots supported by the controller.
+  The number of physical slots actually used could be equal or less than the
+  value specified by num-slots. If this property is not specified, the value
+  of num-slot property is assumed to be 1.
+
+* fifo-depth: The maximum size of the tx/rx fifo's. If this property is not
+  specified, the default value of the fifo size is determined from the
+  controller registers.
+
+* card-detect-delay: Delay in milli-seconds before detecting card after card
+  insert event. The default value is 0.
+
+* supports-highspeed: Enables support for high speed cards (up to 50MHz)
+
+* broken-cd: as documented in mmc core bindings.
+
+* vmmc-supply: The phandle to the regulator to use for vmmc.  If this is
+  specified we'll defer probe until we can find this regulator.
+
+Aliases:
+
+- All the MSHC controller nodes should be represented in the aliases node using
+  the following format 'mshc{n}' where n is a unique number for the alias.
+
+Example:
+
+The MSHC controller node can be split into two portions, SoC specific and
+board specific portions as listed below.
+
+       dwmmc0@12200000 {
+               compatible = "snps,dw-mshc";
+               clocks = <&clock 351>, <&clock 132>;
+               clock-names = "biu", "ciu";
+               reg = <0x12200000 0x1000>;
+               interrupts = <0 75 0>;
+               #address-cells = <1>;
+               #size-cells = <0>;
+       };
+
+       dwmmc0@12200000 {
+               clock-frequency = <400000000>;
+               num-slots = <1>;
+               supports-highspeed;
+               broken-cd;
+               fifo-depth = <0x80>;
+               card-detect-delay = <200>;
+               vmmc-supply = <&buck8>;
+
+               slot@0 {
+                       reg = <0>;
+                       bus-width = <8>;
+               };
+       };
index 2c6be0377f55d0963970972c4d2b494e73f4507f..d2ea4605d0789dc8d11ff3e1fd8686a30431a43b 100644 (file)
@@ -86,6 +86,7 @@ General Properties:
 
 Clock Properties:
 
+  - fsl,cksel        Timer reference clock source.
   - fsl,tclk-period  Timer reference clock period in nanoseconds.
   - fsl,tmr-prsc     Prescaler, divides the output clock.
   - fsl,tmr-add      Frequency compensation value.
@@ -97,7 +98,7 @@ Clock Properties:
   clock. You must choose these carefully for the clock to work right.
   Here is how to figure good values:
 
-  TimerOsc     = system clock               MHz
+  TimerOsc     = selected reference clock   MHz
   tclk_period  = desired clock period       nanoseconds
   NominalFreq  = 1000 / tclk_period         MHz
   FreqDivRatio = TimerOsc / NominalFreq     (must be greater that 1.0)
@@ -114,6 +115,20 @@ Clock Properties:
   Pulse Per Second (PPS) signal, since this will be offered to the PPS
   subsystem to synchronize the Linux clock.
 
+  Reference clock source is determined by the value, which is holded
+  in CKSEL bits in TMR_CTRL register. "fsl,cksel" property keeps the
+  value, which will be directly written in those bits, that is why,
+  according to reference manual, the next clock sources can be used:
+
+  <0> - external high precision timer reference clock (TSEC_TMR_CLK
+        input is used for this purpose);
+  <1> - eTSEC system clock;
+  <2> - eTSEC1 transmit clock;
+  <3> - RTC clock input.
+
+  When this attribute is not used, eTSEC system clock will serve as
+  IEEE 1588 timer reference clock.
+
 Example:
 
        ptp_clock@24E00 {
@@ -121,6 +136,7 @@ Example:
                reg = <0x24E00 0xB0>;
                interrupts = <12 0x8 13 0x8>;
                interrupt-parent = < &ipic >;
+               fsl,cksel       = <1>;
                fsl,tclk-period = <10>;
                fsl,tmr-prsc    = <100>;
                fsl,tmr-add     = <0x999999A4>;
index eabcb4b5db6e6711b244ea9a35e7b4ff711c12ef..e216af356847c05ac4ab9f2e9a6ae887b1939f2a 100644 (file)
@@ -1,4 +1,4 @@
-* Synopsis Designware PCIe interface
+* Synopsys Designware PCIe interface
 
 Required properties:
 - compatible: should contain "snps,dw-pcie" to identify the
diff --git a/Documentation/devicetree/bindings/serial/qca,ar9330-uart.txt b/Documentation/devicetree/bindings/serial/qca,ar9330-uart.txt
new file mode 100644 (file)
index 0000000..c5e032c
--- /dev/null
@@ -0,0 +1,34 @@
+* Qualcomm Atheros AR9330 High-Speed UART
+
+Required properties:
+
+- compatible: Must be "qca,ar9330-uart"
+
+- reg: Specifies the physical base address of the controller and
+  the length of the memory mapped region.
+
+- interrupt-parent: The phandle for the interrupt controller that
+  services interrupts for this device.
+
+- interrupts: Specifies the interrupt source of the parent interrupt
+  controller. The format of the interrupt specifier depends on the
+  parent interrupt controller.
+
+Additional requirements:
+
+  Each UART port must have an alias correctly numbered in "aliases"
+  node.
+
+Example:
+
+       aliases {
+               serial0 = &uart0;
+       };
+
+       uart0: uart@18020000 {
+               compatible = "qca,ar9330-uart";
+               reg = <0x18020000 0x14>;
+
+               interrupt-parent = <&intc>;
+               interrupts = <3>;
+       };
diff --git a/Documentation/devicetree/bindings/tty/serial/qca,ar9330-uart.txt b/Documentation/devicetree/bindings/tty/serial/qca,ar9330-uart.txt
deleted file mode 100644 (file)
index c5e032c..0000000
+++ /dev/null
@@ -1,34 +0,0 @@
-* Qualcomm Atheros AR9330 High-Speed UART
-
-Required properties:
-
-- compatible: Must be "qca,ar9330-uart"
-
-- reg: Specifies the physical base address of the controller and
-  the length of the memory mapped region.
-
-- interrupt-parent: The phandle for the interrupt controller that
-  services interrupts for this device.
-
-- interrupts: Specifies the interrupt source of the parent interrupt
-  controller. The format of the interrupt specifier depends on the
-  parent interrupt controller.
-
-Additional requirements:
-
-  Each UART port must have an alias correctly numbered in "aliases"
-  node.
-
-Example:
-
-       aliases {
-               serial0 = &uart0;
-       };
-
-       uart0: uart@18020000 {
-               compatible = "qca,ar9330-uart";
-               reg = <0x18020000 0x14>;
-
-               interrupt-parent = <&intc>;
-               interrupts = <3>;
-       };
index 1a036cd972fb0c205109ed66b968c3d771f66418..fcbb736d55feb439c1152be71355d5ab79f8edd2 100644 (file)
@@ -480,6 +480,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        Format: <io>,<irq>,<mode>
                        See header of drivers/net/hamradio/baycom_ser_hdx.c.
 
+       blkdevparts=    Manual partition parsing of block device(s) for
+                       embedded devices based on command line input.
+                       See Documentation/block/cmdline-partition.txt
+
        boot_delay=     Milliseconds to delay each printk during boot.
                        Values larger than 10 seconds (10000) are changed to
                        no delay (0).
@@ -1357,7 +1361,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        pages. In the event, a node is too small to have both
                        kernelcore and Movable pages, kernelcore pages will
                        take priority and other nodes will have a larger number
-                       of kernelcore pages.  The Movable zone is used for the
+                       of Movable pages.  The Movable zone is used for the
                        allocation of pages that may be reclaimed or moved
                        by the page migration subsystem.  This means that
                        HugeTLB pages may not be allocated from this zone.
@@ -3485,6 +3489,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                                the unplug protocol
                        never -- do not unplug even if version check succeeds
 
+       xen_nopvspin    [X86,XEN]
+                       Disables the ticketlock slowpath using Xen PV
+                       optimizations.
+
        xirc2ps_cs=     [NET,PCMCIA]
                        Format:
                        <irq>,<irq_mask>,<io>,<full_duplex>,<do_sound>,<lockup_hack>[,<irq2>[,<irq3>[,<irq4>]]]
index 9b28e714831ae35fd0664debe4a1091825701024..3856ed2c45a9ffda4918ab3615af8dfc1862890f 100644 (file)
@@ -743,21 +743,16 @@ xmit_hash_policy
                protocol information to generate the hash.
 
                Uses XOR of hardware MAC addresses and IP addresses to
-               generate the hash.  The IPv4 formula is
+               generate the hash.  The formula is
 
-               (((source IP XOR dest IP) AND 0xffff) XOR
-                       ( source MAC XOR destination MAC ))
-                               modulo slave count
+               hash = source MAC XOR destination MAC
+               hash = hash XOR source IP XOR destination IP
+               hash = hash XOR (hash RSHIFT 16)
+               hash = hash XOR (hash RSHIFT 8)
+               And then hash is reduced modulo slave count.
 
-               The IPv6 formula is
-
-               hash = (source ip quad 2 XOR dest IP quad 2) XOR
-                      (source ip quad 3 XOR dest IP quad 3) XOR
-                      (source ip quad 4 XOR dest IP quad 4)
-
-               (((hash >> 24) XOR (hash >> 16) XOR (hash >> 8) XOR hash)
-                       XOR (source MAC XOR destination MAC))
-                               modulo slave count
+               If the protocol is IPv6 then the source and destination
+               addresses are first hashed using ipv6_addr_hash.
 
                This algorithm will place all traffic to a particular
                network peer on the same slave.  For non-IP traffic,
@@ -779,21 +774,16 @@ xmit_hash_policy
                slaves, although a single connection will not span
                multiple slaves.
 
-               The formula for unfragmented IPv4 TCP and UDP packets is
-
-               ((source port XOR dest port) XOR
-                        ((source IP XOR dest IP) AND 0xffff)
-                               modulo slave count
+               The formula for unfragmented TCP and UDP packets is
 
-               The formula for unfragmented IPv6 TCP and UDP packets is
+               hash = source port, destination port (as in the header)
+               hash = hash XOR source IP XOR destination IP
+               hash = hash XOR (hash RSHIFT 16)
+               hash = hash XOR (hash RSHIFT 8)
+               And then hash is reduced modulo slave count.
 
-               hash = (source port XOR dest port) XOR
-                      ((source ip quad 2 XOR dest IP quad 2) XOR
-                       (source ip quad 3 XOR dest IP quad 3) XOR
-                       (source ip quad 4 XOR dest IP quad 4))
-
-               ((hash >> 24) XOR (hash >> 16) XOR (hash >> 8) XOR hash)
-                       modulo slave count
+               If the protocol is IPv6 then the source and destination
+               addresses are first hashed using ipv6_addr_hash.
 
                For fragmented TCP or UDP packets and all other IPv4 and
                IPv6 protocol traffic, the source and destination port
@@ -801,10 +791,6 @@ xmit_hash_policy
                formula is the same as for the layer2 transmit hash
                policy.
 
-               The IPv4 policy is intended to mimic the behavior of
-               certain switches, notably Cisco switches with PFC2 as
-               well as some Foundry and IBM products.
-
                This algorithm is not fully 802.3ad compliant.  A
                single TCP or UDP conversation containing both
                fragmented and unfragmented packets will see packets
@@ -815,6 +801,26 @@ xmit_hash_policy
                conversations.  Other implementations of 802.3ad may
                or may not tolerate this noncompliance.
 
+       encap2+3
+
+               This policy uses the same formula as layer2+3 but it
+               relies on skb_flow_dissect to obtain the header fields
+               which might result in the use of inner headers if an
+               encapsulation protocol is used. For example this will
+               improve the performance for tunnel users because the
+               packets will be distributed according to the encapsulated
+               flows.
+
+       encap3+4
+
+               This policy uses the same formula as layer3+4 but it
+               relies on skb_flow_dissect to obtain the header fields
+               which might result in the use of inner headers if an
+               encapsulation protocol is used. For example this will
+               improve the performance for tunnel users because the
+               packets will be distributed according to the encapsulated
+               flows.
+
        The default value is layer2.  This option was added in bonding
        version 2.6.3.  In earlier versions of bonding, this parameter
        does not exist, and the layer2 policy is the only policy.  The
index a46ddb85e83a0dcdf0f2a54fd9b745eadb31cb6f..f911e3656209f6c1bdc49a156f322b64dfd3a824 100644 (file)
@@ -296,6 +296,12 @@ Cirrus Logic CS4206/4207
   imac27       IMac 27 Inch
   auto         BIOS setup (default)
 
+Cirrus Logic CS4208
+===================
+  mba6         MacBook Air 6,1 and 6,2
+  gpio0                Enable GPIO 0 amp
+  auto         BIOS setup (default)
+
 VIA VT17xx/VT18xx/VT20xx
 ========================
   auto         BIOS setup (default)
index e61c2e83fc2b3b1f7570cff0cc6dc12d87aa8059..744a23954a349e88a2124ddd18efd24972523d8b 100644 (file)
@@ -1812,7 +1812,8 @@ S:        Supported
 F:     drivers/net/ethernet/broadcom/bnx2x/
 
 BROADCOM BCM281XX/BCM11XXX ARM ARCHITECTURE
-M:     Christian Daudt <csd@broadcom.com>
+M:     Christian Daudt <bcm@fixthebug.org>
+L:     bcm-kernel-feedback-list@broadcom.com
 T:     git git://git.github.com/broadcom/bcm11351
 S:     Maintained
 F:     arch/arm/mach-bcm/
@@ -2639,6 +2640,18 @@ F:       include/linux/device-mapper.h
 F:     include/linux/dm-*.h
 F:     include/uapi/linux/dm-*.h
 
+DIGI NEO AND CLASSIC PCI PRODUCTS
+M:     Lidza Louina <lidza.louina@gmail.com>
+L:     driverdev-devel@linuxdriverproject.org
+S:     Maintained
+F:     drivers/staging/dgnc/
+
+DIGI EPCA PCI PRODUCTS
+M:     Lidza Louina <lidza.louina@gmail.com>
+L:     driverdev-devel@linuxdriverproject.org
+S:     Maintained
+F:     drivers/staging/dgap/
+
 DIOLAN U2C-12 I2C DRIVER
 M:     Guenter Roeck <linux@roeck-us.net>
 L:     linux-i2c@vger.kernel.org
@@ -6595,7 +6608,7 @@ S:        Obsolete
 F:     drivers/net/wireless/prism54/
 
 PROMISE SATA TX2/TX4 CONTROLLER LIBATA DRIVER
-M:     Mikael Pettersson <mikpe@it.uu.se>
+M:     Mikael Pettersson <mikpelinux@gmail.com>
 L:     linux-ide@vger.kernel.org
 S:     Maintained
 F:     drivers/ata/sata_promise.*
@@ -7258,9 +7271,9 @@ F:        include/linux/sched.h
 F:     include/uapi/linux/sched.h
 
 SCORE ARCHITECTURE
-M:     Chen Liqin <liqin.chen@sunplusct.com>
+M:     Chen Liqin <liqin.linux@gmail.com>
 M:     Lennox Wu <lennox.wu@gmail.com>
-W:     http://www.sunplusct.com
+W:     http://www.sunplus.com
 S:     Supported
 F:     arch/score/
 
@@ -8724,9 +8737,8 @@ F:        Documentation/hid/hiddev.txt
 F:     drivers/hid/usbhid/
 
 USB/IP DRIVERS
-M:     Matt Mooney <mfm@muteddisk.com>
 L:     linux-usb@vger.kernel.org
-S:     Maintained
+S:     Orphan
 F:     drivers/staging/usbip/
 
 USB ISP116X DRIVER
@@ -9366,6 +9378,7 @@ F:        arch/arm64/include/asm/xen/
 
 XEN NETWORK BACKEND DRIVER
 M:     Ian Campbell <ian.campbell@citrix.com>
+M:     Wei Liu <wei.liu2@citrix.com>
 L:     xen-devel@lists.xenproject.org (moderated for non-subscribers)
 L:     netdev@vger.kernel.org
 S:     Supported
index de004ceb6b5e32d65e4b21c77855806bf709bbfc..2ae108d4f2af36c2444d296d2413736c1ca08cc9 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 12
 SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc3
 NAME = One Giant Leap for Frogkind
 
 # *DOCUMENTATION*
index 1feb169274fe613cc5f0360c797668bf84497c4b..af2cc6eabcc781c4e8f7ee2067de75844ed5874d 100644 (file)
@@ -286,9 +286,6 @@ config HAVE_PERF_USER_STACK_DUMP
 config HAVE_ARCH_JUMP_LABEL
        bool
 
-config HAVE_ARCH_MUTEX_CPU_RELAX
-       bool
-
 config HAVE_RCU_TABLE_FREE
        bool
 
index 467de010ea7ee130cdbda0b532f10c885f8d1308..e3a1491d5073a0b59a6b9eb207c4a684ae6c9495 100644 (file)
@@ -81,6 +81,8 @@
 
 #define SO_SELECT_ERR_QUEUE    45
 
-#define SO_BUSY_POLL                   46
+#define SO_BUSY_POLL           46
+
+#define SO_MAX_PACING_RATE     47
 
 #endif /* _UAPI_ASM_SOCKET_H */
index f158197ac5b04432ac6beb37c9175629361c0901..b6a8c2dfbe6e42cd51def893784f0780bc67264e 100644 (file)
@@ -45,7 +45,14 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
 
 static inline void arch_spin_unlock(arch_spinlock_t *lock)
 {
-       lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
+       unsigned int tmp = __ARCH_SPIN_LOCK_UNLOCKED__;
+
+       __asm__ __volatile__(
+       "       ex  %0, [%1]            \n"
+       : "+r" (tmp)
+       : "r"(&(lock->slock))
+       : "memory");
+
        smp_mb();
 }
 
index 32420824375b351083da3e8686cc0013f0914871..30c9baffa96f1f3a5cab5d6ec6fe83b9f4e86318 100644 (file)
@@ -43,7 +43,7 @@
  * Because it essentially checks if buffer end is within limit and @len is
  * non-ngeative, which implies that buffer start will be within limit too.
  *
- * The reason for rewriting being, for majorit yof cases, @len is generally
+ * The reason for rewriting being, for majoritof cases, @len is generally
  * compile time constant, causing first sub-expression to be compile time
  * subsumed.
  *
@@ -53,7 +53,7 @@
  *
  */
 #define __user_ok(addr, sz)    (((sz) <= TASK_SIZE) && \
-                                (((addr)+(sz)) <= get_fs()))
+                                ((addr) <= (get_fs() - (sz))))
 #define __access_ok(addr, sz)  (unlikely(__kernel_ok) || \
                                 likely(__user_ok((addr), (sz))))
 
index 0e51e69cf30d772b646ef8a1d2c5605bf63b929a..3fde7de3ea670351ac69e0f441f35f776ebbb7d8 100644 (file)
@@ -227,12 +227,9 @@ void __attribute__((weak)) arc_local_timer_setup(unsigned int cpu)
 {
        struct clock_event_device *clk = &per_cpu(arc_clockevent_device, cpu);
 
-       clockevents_calc_mult_shift(clk, arc_get_core_freq(), 5);
-
-       clk->max_delta_ns = clockevent_delta2ns(ARC_TIMER_MAX, clk);
        clk->cpumask = cpumask_of(cpu);
-
-       clockevents_register_device(clk);
+       clockevents_config_and_register(clk, arc_get_core_freq(),
+                                       0, ARC_TIMER_MAX);
 
        /*
         * setup the per-cpu timer IRQ handler - for all cpus
index 28d1700607474eb01be14e0600832bf7ee4cf999..7ff5b5c183bb026716295c13f7b123de1d67a96f 100644 (file)
@@ -245,6 +245,12 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs,
                regs->status32 &= ~STATUS_DE_MASK;
        } else {
                regs->ret += state.instr_len;
+
+               /* handle zero-overhead-loop */
+               if ((regs->ret == regs->lp_end) && (regs->lp_count)) {
+                       regs->ret = regs->lp_start;
+                       regs->lp_count--;
+               }
        }
 
        return 0;
index 3f7714d8d2d216bf3bbd7b4a5b227ea982997554..1ad6fb6c094db415ec76a72a28356e75bdfd7d17 100644 (file)
@@ -2217,8 +2217,7 @@ config NEON
 
 config KERNEL_MODE_NEON
        bool "Support for NEON in kernel mode"
-       default n
-       depends on NEON
+       depends on NEON && AEABI
        help
          Say Y to include support for NEON in kernel mode.
 
index 19d6cd6f29f98b95962cca5643dca61debdf7b8e..3a14ea8fe97e5cac183ad8c9148b816e473b4ae5 100644 (file)
@@ -148,7 +148,7 @@ AES_Te:
 @               const AES_KEY *key) {
 .align 5
 ENTRY(AES_encrypt)
-       sub     r3,pc,#8                @ AES_encrypt
+       adr     r3,AES_encrypt
        stmdb   sp!,{r1,r4-r12,lr}
        mov     r12,r0          @ inp
        mov     r11,r2
@@ -381,7 +381,7 @@ _armv4_AES_encrypt:
 .align 5
 ENTRY(private_AES_set_encrypt_key)
 _armv4_AES_set_encrypt_key:
-       sub     r3,pc,#8                @ AES_set_encrypt_key
+       adr     r3,_armv4_AES_set_encrypt_key
        teq     r0,#0
        moveq   r0,#-1
        beq     .Labrt
@@ -843,7 +843,7 @@ AES_Td:
 @               const AES_KEY *key) {
 .align 5
 ENTRY(AES_decrypt)
-       sub     r3,pc,#8                @ AES_decrypt
+       adr     r3,AES_decrypt
        stmdb   sp!,{r1,r4-r12,lr}
        mov     r12,r0          @ inp
        mov     r11,r2
index 7e1f76027f666e252c35bd320d4518e110548c47..72abdc541f38f6e892a24050d1992fc37098f5e3 100644 (file)
 #include <asm/unified.h>
 #include <asm/compiler.h>
 
+#if __LINUX_ARM_ARCH__ < 6
+#include <asm-generic/uaccess-unaligned.h>
+#else
+#define __get_user_unaligned __get_user
+#define __put_user_unaligned __put_user
+#endif
+
 #define VERIFY_READ 0
 #define VERIFY_WRITE 1
 
index 74ad15d1a065fba97aac1d4ac43ff33a9e994470..bc6bd9683ba4555d9713e1693b258e6204fc90a5 100644 (file)
@@ -442,10 +442,10 @@ local_restart:
        ldrcc   pc, [tbl, scno, lsl #2]         @ call sys_* routine
 
        add     r1, sp, #S_OFF
-       cmp     scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
+2:     cmp     scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
        eor     r0, scno, #__NR_SYSCALL_BASE    @ put OS number back
        bcs     arm_syscall
-2:     mov     why, #0                         @ no longer a real syscall
+       mov     why, #0                         @ no longer a real syscall
        b       sys_ni_syscall                  @ not private func
 
 #if defined(CONFIG_OABI_COMPAT) || !defined(CONFIG_AEABI)
index de23a9beed1333d86d1143a7b31613ff7376440d..39f89fbd5111ee9f0a96d6712e1f0f2ba308147d 100644 (file)
 #ifdef CONFIG_CONTEXT_TRACKING
        .if     \save
        stmdb   sp!, {r0-r3, ip, lr}
-       bl      user_exit
+       bl      context_tracking_user_exit
        ldmia   sp!, {r0-r3, ip, lr}
        .else
-       bl      user_exit
+       bl      context_tracking_user_exit
        .endif
 #endif
        .endm
 #ifdef CONFIG_CONTEXT_TRACKING
        .if     \save
        stmdb   sp!, {r0-r3, ip, lr}
-       bl      user_enter
+       bl      context_tracking_user_enter
        ldmia   sp!, {r0-r3, ip, lr}
        .else
-       bl      user_enter
+       bl      context_tracking_user_enter
        .endif
 #endif
        .endm
index 71e08baee209387f899e14a32fa32e02682b0ae8..c02ba4af599f417113fdb2c260270ae7162575e6 100644 (file)
@@ -58,14 +58,14 @@ static const struct kvm_irq_level a15_vtimer_irq = {
  */
 int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
 {
-       struct kvm_regs *cpu_reset;
+       struct kvm_regs *reset_regs;
        const struct kvm_irq_level *cpu_vtimer_irq;
 
        switch (vcpu->arch.target) {
        case KVM_ARM_TARGET_CORTEX_A15:
                if (vcpu->vcpu_id > a15_max_cpu_idx)
                        return -EINVAL;
-               cpu_reset = &a15_regs_reset;
+               reset_regs = &a15_regs_reset;
                vcpu->arch.midr = read_cpuid_id();
                cpu_vtimer_irq = &a15_vtimer_irq;
                break;
@@ -74,7 +74,7 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
        }
 
        /* Reset core registers */
-       memcpy(&vcpu->arch.regs, cpu_reset, sizeof(vcpu->arch.regs));
+       memcpy(&vcpu->arch.regs, reset_regs, sizeof(vcpu->arch.regs));
 
        /* Reset CP15 registers */
        kvm_reset_coprocs(vcpu);
index c3cfa4116dc09ffe22f2227c47130be0d900945e..c6b40f3867863c7de1a52e5a1ab72acdc23f758f 100644 (file)
@@ -285,7 +285,7 @@ int __init mx27_clocks_init(unsigned long fref)
        clk_register_clkdev(clk[ata_ahb_gate], "ata", NULL);
        clk_register_clkdev(clk[rtc_ipg_gate], NULL, "imx21-rtc");
        clk_register_clkdev(clk[scc_ipg_gate], "scc", NULL);
-       clk_register_clkdev(clk[cpu_div], NULL, "cpufreq-cpu0.0");
+       clk_register_clkdev(clk[cpu_div], NULL, "cpu0");
        clk_register_clkdev(clk[emi_ahb_gate], "emi_ahb" , NULL);
 
        mxc_timer_init(MX27_IO_ADDRESS(MX27_GPT1_BASE_ADDR), MX27_INT_GPT1);
index d9094b9a51857d8b73eb21e8dc76002ead2cf55f..7c0dc4540aa4785270784e3f62ce2b643fd95664 100644 (file)
@@ -328,7 +328,7 @@ static void __init mx5_clocks_common_init(unsigned long rate_ckil,
        clk_register_clkdev(clk[ssi2_ipg_gate], NULL, "imx-ssi.1");
        clk_register_clkdev(clk[ssi3_ipg_gate], NULL, "imx-ssi.2");
        clk_register_clkdev(clk[sdma_gate], NULL, "imx35-sdma");
-       clk_register_clkdev(clk[cpu_podf], NULL, "cpufreq-cpu0.0");
+       clk_register_clkdev(clk[cpu_podf], NULL, "cpu0");
        clk_register_clkdev(clk[iim_gate], "iim", NULL);
        clk_register_clkdev(clk[dummy], NULL, "imx2-wdt.0");
        clk_register_clkdev(clk[dummy], NULL, "imx2-wdt.1");
index 85a1b51346c8db12845d3123dd7758af034e3357..90372a21087f9ef38535479ccc35aac9e37977dc 100644 (file)
@@ -233,10 +233,15 @@ put_node:
        of_node_put(np);
 }
 
-static void __init imx6q_opp_init(struct device *cpu_dev)
+static void __init imx6q_opp_init(void)
 {
        struct device_node *np;
+       struct device *cpu_dev = get_cpu_device(0);
 
+       if (!cpu_dev) {
+               pr_warn("failed to get cpu0 device\n");
+               return;
+       }
        np = of_node_get(cpu_dev->of_node);
        if (!np) {
                pr_warn("failed to find cpu0 node\n");
@@ -268,7 +273,7 @@ static void __init imx6q_init_late(void)
                imx6q_cpuidle_init();
 
        if (IS_ENABLED(CONFIG_ARM_IMX6Q_CPUFREQ)) {
-               imx6q_opp_init(&imx6q_cpufreq_pdev.dev);
+               imx6q_opp_init();
                platform_device_register(&imx6q_cpufreq_pdev);
        }
 }
index 8ea5ef6c79ccbed859318cc69745488c0d273290..5bd2e851e3c7f2d03cd7ae4dd647ec1cc7d9c537 100644 (file)
@@ -555,7 +555,7 @@ static struct clk_lookup lookups[] = {
        CLKDEV_CON_ID("pll2h",                  &pll2h_clk),
 
        /* CPU clock */
-       CLKDEV_DEV_ID("cpufreq-cpu0",           &z_clk),
+       CLKDEV_DEV_ID("cpu0",                   &z_clk),
 
        /* DIV6 */
        CLKDEV_CON_ID("zb",                     &div6_clks[DIV6_ZB]),
index 1942eaef518134804110ed14cc5a61c94550af27..c92c023f0d27c1de8778665e372d1abfba82db42 100644 (file)
@@ -616,7 +616,7 @@ static struct clk_lookup lookups[] = {
        CLKDEV_DEV_ID("smp_twd", &twd_clk), /* smp_twd */
 
        /* DIV4 clocks */
-       CLKDEV_DEV_ID("cpufreq-cpu0", &div4_clks[DIV4_Z]),
+       CLKDEV_DEV_ID("cpu0", &div4_clks[DIV4_Z]),
 
        /* DIV6 clocks */
        CLKDEV_CON_ID("vck1_clk", &div6_clks[DIV6_VCK1]),
index d22af851f3f638b09fdc4394a05e2cebf8a10201..fd7980743890b2ebcf5ff9b5ca40fc4ef83de220 100644 (file)
@@ -1,5 +1,19 @@
 
 generic-y      += clkdev.h
+generic-y       += cputime.h
+generic-y       += delay.h
+generic-y       += device.h
+generic-y       += div64.h
+generic-y       += emergency-restart.h
 generic-y      += exec.h
-generic-y      += trace_clock.h
+generic-y       += futex.h
+generic-y       += irq_regs.h
 generic-y      += param.h
+generic-y       += local.h
+generic-y       += local64.h
+generic-y       += percpu.h
+generic-y       += scatterlist.h
+generic-y       += sections.h
+generic-y       += topology.h
+generic-y      += trace_clock.h
+generic-y       += xor.h
diff --git a/arch/avr32/include/asm/cputime.h b/arch/avr32/include/asm/cputime.h
deleted file mode 100644 (file)
index e87e0f8..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_AVR32_CPUTIME_H
-#define __ASM_AVR32_CPUTIME_H
-
-#include <asm-generic/cputime.h>
-
-#endif /* __ASM_AVR32_CPUTIME_H */
diff --git a/arch/avr32/include/asm/delay.h b/arch/avr32/include/asm/delay.h
deleted file mode 100644 (file)
index 9670e12..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/delay.h>
diff --git a/arch/avr32/include/asm/device.h b/arch/avr32/include/asm/device.h
deleted file mode 100644 (file)
index d8f9872..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-/*
- * Arch specific extensions to struct device
- *
- * This file is released under the GPLv2
- */
-#include <asm-generic/device.h>
-
diff --git a/arch/avr32/include/asm/div64.h b/arch/avr32/include/asm/div64.h
deleted file mode 100644 (file)
index d7ddd4f..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_AVR32_DIV64_H
-#define __ASM_AVR32_DIV64_H
-
-#include <asm-generic/div64.h>
-
-#endif /* __ASM_AVR32_DIV64_H */
diff --git a/arch/avr32/include/asm/emergency-restart.h b/arch/avr32/include/asm/emergency-restart.h
deleted file mode 100644 (file)
index 3e7e014..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_AVR32_EMERGENCY_RESTART_H
-#define __ASM_AVR32_EMERGENCY_RESTART_H
-
-#include <asm-generic/emergency-restart.h>
-
-#endif /* __ASM_AVR32_EMERGENCY_RESTART_H */
diff --git a/arch/avr32/include/asm/futex.h b/arch/avr32/include/asm/futex.h
deleted file mode 100644 (file)
index 10419f1..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_AVR32_FUTEX_H
-#define __ASM_AVR32_FUTEX_H
-
-#include <asm-generic/futex.h>
-
-#endif /* __ASM_AVR32_FUTEX_H */
diff --git a/arch/avr32/include/asm/irq_regs.h b/arch/avr32/include/asm/irq_regs.h
deleted file mode 100644 (file)
index 3dd9c0b..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/irq_regs.h>
diff --git a/arch/avr32/include/asm/local.h b/arch/avr32/include/asm/local.h
deleted file mode 100644 (file)
index 1c16196..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_AVR32_LOCAL_H
-#define __ASM_AVR32_LOCAL_H
-
-#include <asm-generic/local.h>
-
-#endif /* __ASM_AVR32_LOCAL_H */
diff --git a/arch/avr32/include/asm/local64.h b/arch/avr32/include/asm/local64.h
deleted file mode 100644 (file)
index 36c93b5..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/local64.h>
diff --git a/arch/avr32/include/asm/percpu.h b/arch/avr32/include/asm/percpu.h
deleted file mode 100644 (file)
index 69227b4..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_AVR32_PERCPU_H
-#define __ASM_AVR32_PERCPU_H
-
-#include <asm-generic/percpu.h>
-
-#endif /* __ASM_AVR32_PERCPU_H */
diff --git a/arch/avr32/include/asm/scatterlist.h b/arch/avr32/include/asm/scatterlist.h
deleted file mode 100644 (file)
index a5902d9..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_AVR32_SCATTERLIST_H
-#define __ASM_AVR32_SCATTERLIST_H
-
-#include <asm-generic/scatterlist.h>
-
-#endif /* __ASM_AVR32_SCATTERLIST_H */
diff --git a/arch/avr32/include/asm/sections.h b/arch/avr32/include/asm/sections.h
deleted file mode 100644 (file)
index aa14252..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_AVR32_SECTIONS_H
-#define __ASM_AVR32_SECTIONS_H
-
-#include <asm-generic/sections.h>
-
-#endif /* __ASM_AVR32_SECTIONS_H */
diff --git a/arch/avr32/include/asm/topology.h b/arch/avr32/include/asm/topology.h
deleted file mode 100644 (file)
index 5b766cb..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_AVR32_TOPOLOGY_H
-#define __ASM_AVR32_TOPOLOGY_H
-
-#include <asm-generic/topology.h>
-
-#endif /* __ASM_AVR32_TOPOLOGY_H */
diff --git a/arch/avr32/include/asm/xor.h b/arch/avr32/include/asm/xor.h
deleted file mode 100644 (file)
index 99c87aa..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_XOR_H
-#define _ASM_XOR_H
-
-#include <asm-generic/xor.h>
-
-#endif
index 11c4259c62fb146b8139b7045958f68d6adc3765..4399364214349674999c872ca4779c7089c22160 100644 (file)
@@ -76,4 +76,6 @@
 
 #define SO_BUSY_POLL           46
 
+#define SO_MAX_PACING_RATE     47
+
 #endif /* __ASM_AVR32_SOCKET_H */
index c2731003edef556c18cd137f4a0b1aac5e770cc3..42a53e740a7ee1c93044a01742e93250dc919346 100644 (file)
@@ -289,7 +289,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
                memset(childregs, 0, sizeof(struct pt_regs));
                p->thread.cpu_context.r0 = arg;
                p->thread.cpu_context.r1 = usp; /* fn */
-               p->thread.cpu_context.r2 = syscall_return;
+               p->thread.cpu_context.r2 = (unsigned long)syscall_return;
                p->thread.cpu_context.pc = (unsigned long)ret_from_kernel_thread;
                childregs->sr = MODE_SUPERVISOR;
        } else {
index 869a1c6ffeee944a64608d996877a78ee3499245..12f828ad5058d09158f8d3e2007b76a7a42cbb4a 100644 (file)
@@ -98,7 +98,14 @@ static void comparator_mode(enum clock_event_mode mode,
        case CLOCK_EVT_MODE_SHUTDOWN:
                sysreg_write(COMPARE, 0);
                pr_debug("%s: stop\n", evdev->name);
-               cpu_idle_poll_ctrl(false);
+               if (evdev->mode == CLOCK_EVT_MODE_ONESHOT ||
+                   evdev->mode == CLOCK_EVT_MODE_RESUME) {
+                       /*
+                        * Only disable idle poll if we have forced that
+                        * in a previous call.
+                        */
+                       cpu_idle_poll_ctrl(false);
+               }
                break;
        default:
                BUG();
index eb723e51554e559ef73d1196a455a96583a5cf51..13829aaaeec565b8e53726e95fcf4e7fde638c25 100644 (file)
@@ -78,6 +78,8 @@
 
 #define SO_BUSY_POLL           46
 
+#define SO_MAX_PACING_RATE     47
+
 #endif /* _ASM_SOCKET_H */
 
 
index f0cb1c3411638e7b33bee915739881635c59c9bc..5d4299762426b108057a33faf71b85efdfe4f74b 100644 (file)
@@ -76,5 +76,7 @@
 
 #define SO_BUSY_POLL           46
 
+#define SO_MAX_PACING_RATE     47
+
 #endif /* _ASM_SOCKET_H */
 
index 9490758c5e2bac1aa4375063a713473221234d97..214ccaf3554a5e205d7a7a0e182de4748922d12f 100644 (file)
@@ -76,4 +76,6 @@
 
 #define SO_BUSY_POLL           46
 
+#define SO_MAX_PACING_RATE     47
+
 #endif /* _ASM_SOCKET_H */
index 556d0701a155351e844960aa00b51c55c820a3b9..c25302fb48d95636b59670efa423a59ff2d055c0 100644 (file)
@@ -85,4 +85,6 @@
 
 #define SO_BUSY_POLL           46
 
+#define SO_MAX_PACING_RATE     47
+
 #endif /* _ASM_IA64_SOCKET_H */
index 24be7c8da86ad3cbbdee6be0933fe302c9adaf02..52966650114f3198df49c109c02d70ae82d8adcb 100644 (file)
@@ -76,4 +76,6 @@
 
 #define SO_BUSY_POLL           46
 
+#define SO_MAX_PACING_RATE     47
+
 #endif /* _ASM_M32R_SOCKET_H */
index 51680d15ca8ec2b2971365573506b39f473170aa..d445d060e346ab689a177bf584cfebd5b9dfca51 100644 (file)
 
 /*
  * MIPS32, MIPS64, VR5500, IDT32332, IDT32334 and maybe a few other
- * pre-MIPS32/MIPS53 processors have CLO, CLZ. The IDT RC64574 is 64-bit and
+ * pre-MIPS32/MIPS64 processors have CLO, CLZ. The IDT RC64574 is 64-bit and
  * has CLO and CLZ but not DCLO nor DCLZ.  For 64-bit kernels
  * cpu_has_clo_clz also indicates the availability of DCLO and DCLZ.
  */
index 61c01f054d1b160f753582b60888e59592b95b21..0df9787cd84d9e4a93e04513f1654b0327b3df0f 100644 (file)
@@ -94,4 +94,6 @@
 
 #define SO_BUSY_POLL           46
 
+#define SO_MAX_PACING_RATE     47
+
 #endif /* _UAPI_ASM_SOCKET_H */
index f25a7e9f8cbc56e320df0c6c51cc7bf8a45cb40e..5f8b955125801935f33370559476ca93f640c4df 100644 (file)
@@ -308,12 +308,10 @@ static void mips_dma_sync_sg_for_cpu(struct device *dev,
 {
        int i;
 
-       /* Make sure that gcc doesn't leave the empty loop body.  */
-       for (i = 0; i < nelems; i++, sg++) {
-               if (cpu_needs_post_dma_flush(dev))
+       if (cpu_needs_post_dma_flush(dev))
+               for (i = 0; i < nelems; i++, sg++)
                        __dma_sync(sg_page(sg), sg->offset, sg->length,
                                   direction);
-       }
 }
 
 static void mips_dma_sync_sg_for_device(struct device *dev,
@@ -321,12 +319,10 @@ static void mips_dma_sync_sg_for_device(struct device *dev,
 {
        int i;
 
-       /* Make sure that gcc doesn't leave the empty loop body.  */
-       for (i = 0; i < nelems; i++, sg++) {
-               if (!plat_device_is_coherent(dev))
+       if (!plat_device_is_coherent(dev))
+               for (i = 0; i < nelems; i++, sg++)
                        __dma_sync(sg_page(sg), sg->offset, sg->length,
                                   direction);
-       }
 }
 
 int mips_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
index e2a2b203eb005b5403f2b68f92a841d87137b75c..71dedcae55a69c4ea93c5c71c7edfd93583864a9 100644 (file)
@@ -76,4 +76,6 @@
 
 #define SO_BUSY_POLL           46
 
+#define SO_MAX_PACING_RATE     47
+
 #endif /* _ASM_SOCKET_H */
index eb59bfe23e8510c7c812d43496860bcee37974b7..93c9980e1b6b61a93c81dc93a4c0902ee59a451c 100644 (file)
  * the Free Software Foundation; either version 2 of the License, or
  * (at your option) any later version.
  */
-
-#include <linux/of.h>  /* linux/of.h gets to determine #include ordering */
-
 #ifndef _ASM_OPENRISC_PROM_H
 #define _ASM_OPENRISC_PROM_H
-#ifdef __KERNEL__
-#ifndef __ASSEMBLY__
 
-#include <linux/types.h>
-#include <asm/irq.h>
-#include <linux/irqdomain.h>
-#include <linux/atomic.h>
-#include <linux/of_irq.h>
-#include <linux/of_fdt.h>
-#include <linux/of_address.h>
-#include <linux/proc_fs.h>
-#include <linux/platform_device.h>
 #define HAVE_ARCH_DEVTREE_FIXUPS
 
-/* Other Prototypes */
-extern int early_uartlite_console(void);
-
-/* Parse the ibm,dma-window property of an OF node into the busno, phys and
- * size parameters.
- */
-void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop,
-               unsigned long *busno, unsigned long *phys, unsigned long *size);
-
-extern void kdump_move_device_tree(void);
-
-/* Get the MAC address */
-extern const void *of_get_mac_address(struct device_node *np);
-
-/**
- * of_irq_map_pci - Resolve the interrupt for a PCI device
- * @pdev:      the device whose interrupt is to be resolved
- * @out_irq:   structure of_irq filled by this function
- *
- * This function resolves the PCI interrupt for a given PCI device. If a
- * device-node exists for a given pci_dev, it will use normal OF tree
- * walking. If not, it will implement standard swizzling and walk up the
- * PCI tree until an device-node is found, at which point it will finish
- * resolving using the OF tree walking.
- */
-struct pci_dev;
-extern int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq);
-
-#endif /* __ASSEMBLY__ */
-#endif /* __KERNEL__ */
 #endif /* _ASM_OPENRISC_PROM_H */
index 71700e636a8e7b25172cda7f4815b1e46216b3d3..7c614d01f1fa42df36a23b3633f6d6ae7d47aa6a 100644 (file)
@@ -75,6 +75,8 @@
 
 #define SO_BUSY_POLL           0x4027
 
+#define SO_MAX_PACING_RATE     0x4048
+
 /* O_NONBLOCK clashes with the bits used for socket types.  Therefore we
  * have to define SOCK_NONBLOCK to a different value here.
  */
index d10d27a720c0d1f323c2248f01cc93193e73ca1e..00c0ed333a3d53421a161369cf1fa621c8a02d7f 100644 (file)
@@ -182,6 +182,9 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
 
        if (user_mode(regs))
                flags |= FAULT_FLAG_USER;
+
+       acc_type = parisc_acctyp(code, regs->iir);
+
        if (acc_type & VM_WRITE)
                flags |= FAULT_FLAG_WRITE;
 retry:
@@ -196,8 +199,6 @@ retry:
 
 good_area:
 
-       acc_type = parisc_acctyp(code,regs->iir);
-
        if ((vma->vm_flags & acc_type) != acc_type)
                goto bad_area;
 
index 6a15c968d21453230ab469b5921fea28b790dcc5..15ca2255f43853945789c1fdc58f4a5bc5a82dbf 100644 (file)
@@ -74,7 +74,7 @@ src-wlib-$(CONFIG_8xx) += mpc8xx.c planetcore.c
 src-wlib-$(CONFIG_PPC_82xx) += pq2.c fsl-soc.c planetcore.c
 src-wlib-$(CONFIG_EMBEDDED6xx) += mv64x60.c mv64x60_i2c.c ugecon.c
 
-src-plat-y := of.c
+src-plat-y := of.c epapr.c
 src-plat-$(CONFIG_40x) += fixed-head.S ep405.c cuboot-hotfoot.c \
                                treeboot-walnut.c cuboot-acadia.c \
                                cuboot-kilauea.c simpleboot.c \
@@ -97,7 +97,7 @@ src-plat-$(CONFIG_EMBEDDED6xx) += cuboot-pq2.c cuboot-mpc7448hpc2.c \
                                        prpmc2800.c
 src-plat-$(CONFIG_AMIGAONE) += cuboot-amigaone.c
 src-plat-$(CONFIG_PPC_PS3) += ps3-head.S ps3-hvcall.S ps3.c
-src-plat-$(CONFIG_EPAPR_BOOT) += epapr.c
+src-plat-$(CONFIG_EPAPR_BOOT) += epapr.c epapr-wrapper.c
 
 src-wlib := $(sort $(src-wlib-y))
 src-plat := $(sort $(src-plat-y))
diff --git a/arch/powerpc/boot/epapr-wrapper.c b/arch/powerpc/boot/epapr-wrapper.c
new file mode 100644 (file)
index 0000000..c101910
--- /dev/null
@@ -0,0 +1,9 @@
+extern void epapr_platform_init(unsigned long r3, unsigned long r4,
+                               unsigned long r5, unsigned long r6,
+                               unsigned long r7);
+
+void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
+                  unsigned long r6, unsigned long r7)
+{
+       epapr_platform_init(r3, r4, r5, r6, r7);
+}
index 06c1961bd124a06966da1c5c6a85a6d4452e951a..02e91aa2194a57a66766852444835607143fa29a 100644 (file)
@@ -48,8 +48,8 @@ static void platform_fixups(void)
                       fdt_addr, fdt_totalsize((void *)fdt_addr), ima_size);
 }
 
-void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
-                  unsigned long r6, unsigned long r7)
+void epapr_platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
+                        unsigned long r6, unsigned long r7)
 {
        epapr_magic = r6;
        ima_size = r7;
index 61d9899aa0d09d371c99ec70b7959693f8bfab36..62e2f43ec1df1144d3a790e1f3cf3e3263fdca47 100644 (file)
@@ -26,6 +26,9 @@
 
 static unsigned long claim_base;
 
+void epapr_platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
+                        unsigned long r6, unsigned long r7);
+
 static void *of_try_claim(unsigned long size)
 {
        unsigned long addr = 0;
@@ -61,7 +64,7 @@ static void of_image_hdr(const void *hdr)
        }
 }
 
-void platform_init(unsigned long a1, unsigned long a2, void *promptr)
+static void of_platform_init(unsigned long a1, unsigned long a2, void *promptr)
 {
        platform_ops.image_hdr = of_image_hdr;
        platform_ops.malloc = of_try_claim;
@@ -81,3 +84,14 @@ void platform_init(unsigned long a1, unsigned long a2, void *promptr)
                loader_info.initrd_size = a2;
        }
 }
+
+void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
+                  unsigned long r6, unsigned long r7)
+{
+       /* Detect OF vs. ePAPR boot */
+       if (r5)
+               of_platform_init(r3, r4, (void *)r5);
+       else
+               epapr_platform_init(r3, r4, r5, r6, r7);
+}
+
index 6761c746048df389812888b8430aca4f442e191c..cd7af841ba051f8725e8ab33dffae1a23201fc60 100755 (executable)
@@ -148,18 +148,18 @@ make_space=y
 
 case "$platform" in
 pseries)
-    platformo=$object/of.o
+    platformo="$object/of.o $object/epapr.o"
     link_address='0x4000000'
     ;;
 maple)
-    platformo=$object/of.o
+    platformo="$object/of.o $object/epapr.o"
     link_address='0x400000'
     ;;
 pmac|chrp)
-    platformo=$object/of.o
+    platformo="$object/of.o $object/epapr.o"
     ;;
 coff)
-    platformo="$object/crt0.o $object/of.o"
+    platformo="$object/crt0.o $object/of.o $object/epapr.o"
     lds=$object/zImage.coff.lds
     link_address='0x500000'
     pie=
@@ -253,6 +253,7 @@ treeboot-iss4xx-mpic)
     platformo="$object/treeboot-iss4xx.o"
     ;;
 epapr)
+    platformo="$object/epapr.o $object/epapr-wrapper.o"
     link_address='0x20000000'
     pie=-pie
     ;;
index 0e40843a1c6ed58273c1a0e2eb22841e9007e977..41f13cec8a8fcd01bbd2e02f3cfc50083b3addba 100644 (file)
@@ -69,9 +69,9 @@ extern struct thread_info *softirq_ctx[NR_CPUS];
 
 extern void irq_ctx_init(void);
 extern void call_do_softirq(struct thread_info *tp);
-extern int call_handle_irq(int irq, void *p1,
-                          struct thread_info *tp, void *func);
+extern void call_do_irq(struct pt_regs *regs, struct thread_info *tp);
 extern void do_IRQ(struct pt_regs *regs);
+extern void __do_irq(struct pt_regs *regs);
 
 int irq_choose_cpu(const struct cpumask *mask);
 
index e378cccfca55bb20db094f12a2009ad3ba7bf1a4..ce4de5aed7b5c302b292bd38c69f21bd2f059038 100644 (file)
@@ -149,8 +149,6 @@ typedef struct {
 
 struct thread_struct {
        unsigned long   ksp;            /* Kernel stack pointer */
-       unsigned long   ksp_limit;      /* if ksp <= ksp_limit stack overflow */
-
 #ifdef CONFIG_PPC64
        unsigned long   ksp_vsid;
 #endif
@@ -162,6 +160,7 @@ struct thread_struct {
 #endif
 #ifdef CONFIG_PPC32
        void            *pgdir;         /* root of page-table tree */
+       unsigned long   ksp_limit;      /* if ksp <= ksp_limit stack overflow */
 #endif
 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
        /*
@@ -321,7 +320,6 @@ struct thread_struct {
 #else
 #define INIT_THREAD  { \
        .ksp = INIT_SP, \
-       .ksp_limit = INIT_SP_LIMIT, \
        .regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \
        .fs = KERNEL_DS, \
        .fpr = {{0}}, \
index a6d74467c9edb476ad1175e34670848f946528c8..fa698324a1fd13e50ec0d5e01d526b5a00a2b0b8 100644 (file)
@@ -83,4 +83,6 @@
 
 #define SO_BUSY_POLL           46
 
+#define SO_MAX_PACING_RATE     47
+
 #endif /* _ASM_POWERPC_SOCKET_H */
index d8958be5f31a18b7c0bae16509749067aae75265..502c7a4e73f70dc1008754b7f55e5ae164f25a76 100644 (file)
@@ -80,10 +80,11 @@ int main(void)
        DEFINE(TASKTHREADPPR, offsetof(struct task_struct, thread.ppr));
 #else
        DEFINE(THREAD_INFO, offsetof(struct task_struct, stack));
+       DEFINE(THREAD_INFO_GAP, _ALIGN_UP(sizeof(struct thread_info), 16));
+       DEFINE(KSP_LIMIT, offsetof(struct thread_struct, ksp_limit));
 #endif /* CONFIG_PPC64 */
 
        DEFINE(KSP, offsetof(struct thread_struct, ksp));
-       DEFINE(KSP_LIMIT, offsetof(struct thread_struct, ksp_limit));
        DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
 #ifdef CONFIG_BOOKE
        DEFINE(THREAD_NORMSAVES, offsetof(struct thread_struct, normsave[0]));
index c69440cef7af43413987f3cd4b2c936cd0e8f788..57d286a78f86f6ff1231c695b9a10a591af30710 100644 (file)
@@ -441,50 +441,6 @@ void migrate_irqs(void)
 }
 #endif
 
-static inline void handle_one_irq(unsigned int irq)
-{
-       struct thread_info *curtp, *irqtp;
-       unsigned long saved_sp_limit;
-       struct irq_desc *desc;
-
-       desc = irq_to_desc(irq);
-       if (!desc)
-               return;
-
-       /* Switch to the irq stack to handle this */
-       curtp = current_thread_info();
-       irqtp = hardirq_ctx[smp_processor_id()];
-
-       if (curtp == irqtp) {
-               /* We're already on the irq stack, just handle it */
-               desc->handle_irq(irq, desc);
-               return;
-       }
-
-       saved_sp_limit = current->thread.ksp_limit;
-
-       irqtp->task = curtp->task;
-       irqtp->flags = 0;
-
-       /* Copy the softirq bits in preempt_count so that the
-        * softirq checks work in the hardirq context. */
-       irqtp->preempt_count = (irqtp->preempt_count & ~SOFTIRQ_MASK) |
-                              (curtp->preempt_count & SOFTIRQ_MASK);
-
-       current->thread.ksp_limit = (unsigned long)irqtp +
-               _ALIGN_UP(sizeof(struct thread_info), 16);
-
-       call_handle_irq(irq, desc, irqtp, desc->handle_irq);
-       current->thread.ksp_limit = saved_sp_limit;
-       irqtp->task = NULL;
-
-       /* Set any flag that may have been set on the
-        * alternate stack
-        */
-       if (irqtp->flags)
-               set_bits(irqtp->flags, &curtp->flags);
-}
-
 static inline void check_stack_overflow(void)
 {
 #ifdef CONFIG_DEBUG_STACKOVERFLOW
@@ -501,9 +457,9 @@ static inline void check_stack_overflow(void)
 #endif
 }
 
-void do_IRQ(struct pt_regs *regs)
+void __do_irq(struct pt_regs *regs)
 {
-       struct pt_regs *old_regs = set_irq_regs(regs);
+       struct irq_desc *desc;
        unsigned int irq;
 
        irq_enter();
@@ -519,18 +475,56 @@ void do_IRQ(struct pt_regs *regs)
         */
        irq = ppc_md.get_irq();
 
-       /* We can hard enable interrupts now */
+       /* We can hard enable interrupts now to allow perf interrupts */
        may_hard_irq_enable();
 
        /* And finally process it */
-       if (irq != NO_IRQ)
-               handle_one_irq(irq);
-       else
+       if (unlikely(irq == NO_IRQ))
                __get_cpu_var(irq_stat).spurious_irqs++;
+       else {
+               desc = irq_to_desc(irq);
+               if (likely(desc))
+                       desc->handle_irq(irq, desc);
+       }
 
        trace_irq_exit(regs);
 
        irq_exit();
+}
+
+void do_IRQ(struct pt_regs *regs)
+{
+       struct pt_regs *old_regs = set_irq_regs(regs);
+       struct thread_info *curtp, *irqtp;
+
+       /* Switch to the irq stack to handle this */
+       curtp = current_thread_info();
+       irqtp = hardirq_ctx[raw_smp_processor_id()];
+
+       /* Already there ? */
+       if (unlikely(curtp == irqtp)) {
+               __do_irq(regs);
+               set_irq_regs(old_regs);
+               return;
+       }
+
+       /* Prepare the thread_info in the irq stack */
+       irqtp->task = curtp->task;
+       irqtp->flags = 0;
+
+       /* Copy the preempt_count so that the [soft]irq checks work. */
+       irqtp->preempt_count = curtp->preempt_count;
+
+       /* Switch stack and call */
+       call_do_irq(regs, irqtp);
+
+       /* Restore stack limit */
+       irqtp->task = NULL;
+
+       /* Copy back updates to the thread_info */
+       if (irqtp->flags)
+               set_bits(irqtp->flags, &curtp->flags);
+
        set_irq_regs(old_regs);
 }
 
@@ -592,28 +586,22 @@ void irq_ctx_init(void)
                memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
                tp = softirq_ctx[i];
                tp->cpu = i;
-               tp->preempt_count = 0;
 
                memset((void *)hardirq_ctx[i], 0, THREAD_SIZE);
                tp = hardirq_ctx[i];
                tp->cpu = i;
-               tp->preempt_count = HARDIRQ_OFFSET;
        }
 }
 
 static inline void do_softirq_onstack(void)
 {
        struct thread_info *curtp, *irqtp;
-       unsigned long saved_sp_limit = current->thread.ksp_limit;
 
        curtp = current_thread_info();
        irqtp = softirq_ctx[smp_processor_id()];
        irqtp->task = curtp->task;
        irqtp->flags = 0;
-       current->thread.ksp_limit = (unsigned long)irqtp +
-                                   _ALIGN_UP(sizeof(struct thread_info), 16);
        call_do_softirq(irqtp);
-       current->thread.ksp_limit = saved_sp_limit;
        irqtp->task = NULL;
 
        /* Set any flag that may have been set on the
index 777d999f563bb377bff3217358aacdc7667f1fd4..2b0ad984536333d7a15a3b39e6c9234c11b4b669 100644 (file)
 
        .text
 
+/*
+ * We store the saved ksp_limit in the unused part
+ * of the STACK_FRAME_OVERHEAD
+ */
 _GLOBAL(call_do_softirq)
        mflr    r0
        stw     r0,4(r1)
+       lwz     r10,THREAD+KSP_LIMIT(r2)
+       addi    r11,r3,THREAD_INFO_GAP
        stwu    r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
        mr      r1,r3
+       stw     r10,8(r1)
+       stw     r11,THREAD+KSP_LIMIT(r2)
        bl      __do_softirq
+       lwz     r10,8(r1)
        lwz     r1,0(r1)
        lwz     r0,4(r1)
+       stw     r10,THREAD+KSP_LIMIT(r2)
        mtlr    r0
        blr
 
-_GLOBAL(call_handle_irq)
+_GLOBAL(call_do_irq)
        mflr    r0
        stw     r0,4(r1)
-       mtctr   r6
-       stwu    r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r5)
-       mr      r1,r5
-       bctrl
+       lwz     r10,THREAD+KSP_LIMIT(r2)
+       addi    r11,r3,THREAD_INFO_GAP
+       stwu    r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
+       mr      r1,r4
+       stw     r10,8(r1)
+       stw     r11,THREAD+KSP_LIMIT(r2)
+       bl      __do_irq
+       lwz     r10,8(r1)
        lwz     r1,0(r1)
        lwz     r0,4(r1)
+       stw     r10,THREAD+KSP_LIMIT(r2)
        mtlr    r0
        blr
 
index 971d7e78aff20e1ca801dd923dfc77337e834ad5..e59caf874d05ed60e500579b06bfcdf38bd85125 100644 (file)
@@ -40,14 +40,12 @@ _GLOBAL(call_do_softirq)
        mtlr    r0
        blr
 
-_GLOBAL(call_handle_irq)
-       ld      r8,0(r6)
+_GLOBAL(call_do_irq)
        mflr    r0
        std     r0,16(r1)
-       mtctr   r8
-       stdu    r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r5)
-       mr      r1,r5
-       bctrl
+       stdu    r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
+       mr      r1,r4
+       bl      .__do_irq
        ld      r1,0(r1)
        ld      r0,16(r1)
        mtlr    r0
index 6f428da53e2085b877334270286069e2ba54da37..96d2fdf3aa9ebe3bba547fd567c5a232be20ec9a 100644 (file)
@@ -1000,9 +1000,10 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
        kregs = (struct pt_regs *) sp;
        sp -= STACK_FRAME_OVERHEAD;
        p->thread.ksp = sp;
+#ifdef CONFIG_PPC32
        p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
                                _ALIGN_UP(sizeof(struct thread_info), 16);
-
+#endif
 #ifdef CONFIG_HAVE_HW_BREAKPOINT
        p->thread.ptrace_bps[0] = NULL;
 #endif
index 12e656ffe60ea86a00a531578efd55ef98e3faef..5fe2842e8bab7cc4013c987c74791c00005c51ee 100644 (file)
@@ -196,6 +196,8 @@ static int __initdata mem_reserve_cnt;
 
 static cell_t __initdata regbuf[1024];
 
+static bool rtas_has_query_cpu_stopped;
+
 
 /*
  * Error results ... some OF calls will return "-1" on error, some
@@ -1574,6 +1576,11 @@ static void __init prom_instantiate_rtas(void)
        prom_setprop(rtas_node, "/rtas", "linux,rtas-entry",
                     &val, sizeof(val));
 
+       /* Check if it supports "query-cpu-stopped-state" */
+       if (prom_getprop(rtas_node, "query-cpu-stopped-state",
+                        &val, sizeof(val)) != PROM_ERROR)
+               rtas_has_query_cpu_stopped = true;
+
 #if defined(CONFIG_PPC_POWERNV) && defined(__BIG_ENDIAN__)
        /* PowerVN takeover hack */
        prom_rtas_data = base;
@@ -1815,6 +1822,18 @@ static void __init prom_hold_cpus(void)
                = (void *) LOW_ADDR(__secondary_hold_acknowledge);
        unsigned long secondary_hold = LOW_ADDR(__secondary_hold);
 
+       /*
+        * On pseries, if RTAS supports "query-cpu-stopped-state",
+        * we skip this stage, the CPUs will be started by the
+        * kernel using RTAS.
+        */
+       if ((of_platform == PLATFORM_PSERIES ||
+            of_platform == PLATFORM_PSERIES_LPAR) &&
+           rtas_has_query_cpu_stopped) {
+               prom_printf("prom_hold_cpus: skipped\n");
+               return;
+       }
+
        prom_debug("prom_hold_cpus: start...\n");
        prom_debug("    1) spinloop       = 0x%x\n", (unsigned long)spinloop);
        prom_debug("    1) *spinloop      = 0x%x\n", *spinloop);
@@ -3011,6 +3030,8 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
         * On non-powermacs, put all CPUs in spin-loops.
         *
         * PowerMacs use a different mechanism to spin CPUs
+        *
+        * (This must be done after instanciating RTAS)
         */
        if (of_platform != PLATFORM_POWERMAC &&
            of_platform != PLATFORM_OPAL)
index a7ee978fb860c9ffd237d9fbafb716b724e3afec..b1faa1593c9067e68995e1cdc54dbb8465dce587 100644 (file)
@@ -1505,6 +1505,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
                 */
                if ((ra == 1) && !(regs->msr & MSR_PR) \
                        && (val3 >= (regs->gpr[1] - STACK_INT_FRAME_SIZE))) {
+#ifdef CONFIG_PPC32
                        /*
                         * Check if we will touch kernel sack overflow
                         */
@@ -1513,7 +1514,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
                                err = -EINVAL;
                                break;
                        }
-
+#endif /* CONFIG_PPC32 */
                        /*
                         * Check if we already set since that means we'll
                         * lose the previous value.
index 1c1771a402501d00328c450cfaeb5942f360d3f4..24f58cb0a543ece1cb92242b36ec8e18c145f5be 100644 (file)
@@ -233,18 +233,24 @@ static void __init smp_init_pseries(void)
 
        alloc_bootmem_cpumask_var(&of_spin_mask);
 
-       /* Mark threads which are still spinning in hold loops. */
-       if (cpu_has_feature(CPU_FTR_SMT)) {
-               for_each_present_cpu(i) { 
-                       if (cpu_thread_in_core(i) == 0)
-                               cpumask_set_cpu(i, of_spin_mask);
-               }
-       } else {
-               cpumask_copy(of_spin_mask, cpu_present_mask);
+       /*
+        * Mark threads which are still spinning in hold loops
+        *
+        * We know prom_init will not have started them if RTAS supports
+        * query-cpu-stopped-state.
+        */
+       if (rtas_token("query-cpu-stopped-state") == RTAS_UNKNOWN_SERVICE) {
+               if (cpu_has_feature(CPU_FTR_SMT)) {
+                       for_each_present_cpu(i) {
+                               if (cpu_thread_in_core(i) == 0)
+                                       cpumask_set_cpu(i, of_spin_mask);
+                       }
+               } else
+                       cpumask_copy(of_spin_mask, cpu_present_mask);
+
+               cpumask_clear_cpu(boot_cpuid, of_spin_mask);
        }
 
-       cpumask_clear_cpu(boot_cpuid, of_spin_mask);
-
        /* Non-lpar has additional take/give timebase */
        if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) {
                smp_ops->give_timebase = rtas_give_timebase;
index dcc6ac2d802637ab6e5fb9c004eaa69ba5e01479..7143793859fadf0cbc66c1b2119c5cbaee6368c8 100644 (file)
@@ -93,6 +93,7 @@ config S390
        select ARCH_INLINE_WRITE_UNLOCK_IRQ
        select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
        select ARCH_SAVE_PAGE_KEYS if HIBERNATION
+       select ARCH_USE_CMPXCHG_LOCKREF
        select ARCH_WANT_IPC_PARSE_VERSION
        select BUILDTIME_EXTABLE_SORT
        select CLONE_BACKWARDS2
@@ -102,7 +103,6 @@ config S390
        select GENERIC_TIME_VSYSCALL_OLD
        select HAVE_ALIGNED_STRUCT_PAGE if SLUB
        select HAVE_ARCH_JUMP_LABEL if !MARCH_G5
-       select HAVE_ARCH_MUTEX_CPU_RELAX
        select HAVE_ARCH_SECCOMP_FILTER
        select HAVE_ARCH_TRACEHOOK
        select HAVE_ARCH_TRANSPARENT_HUGEPAGE if 64BIT
index 688271f5f2e452b9951599550f33ed0ddcfe0a7c..458c1f7fbc1808d48982aa0c5fe89bfe3df2098c 100644 (file)
@@ -7,5 +7,3 @@
  */
 
 #include <asm-generic/mutex-dec.h>
-
-#define arch_mutex_cpu_relax() barrier()
index 0eb37505cab11c71f083ed508f02c98a72127e95..ca7821f07260301f26c2ff9314432b193ad8bebf 100644 (file)
@@ -198,6 +198,8 @@ static inline void cpu_relax(void)
        barrier();
 }
 
+#define arch_mutex_cpu_relax()  barrier()
+
 static inline void psw_set_key(unsigned int key)
 {
        asm volatile("spka 0(%0)" : : "d" (key));
index 701fe8c59e1f0e9efc302e711eef5bcfe3db479a..83e5d216105e86a2df2ab1367bcfc3fd1590542a 100644 (file)
@@ -44,6 +44,11 @@ extern void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
 extern int arch_spin_trylock_retry(arch_spinlock_t *);
 extern void arch_spin_relax(arch_spinlock_t *lock);
 
+static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
+{
+       return lock.owner_cpu == 0;
+}
+
 static inline void arch_spin_lock(arch_spinlock_t *lp)
 {
        int old;
index 92494494692eca965ee1d315d42a800e08c47605..c286c2e868f03f9683481b82689af19f1a222e2d 100644 (file)
@@ -82,4 +82,6 @@
 
 #define SO_BUSY_POLL           46
 
+#define SO_MAX_PACING_RATE     47
+
 #endif /* _ASM_SOCKET_H */
index a1be70db75fec95d9cfa087008edf86a6baf7280..305f7ee1f382e60739464c6d0f3de98855d638ea 100644 (file)
@@ -2,6 +2,7 @@ menu "Machine selection"
 
 config SCORE
        def_bool y
+       select HAVE_GENERIC_HARDIRQS
        select GENERIC_IRQ_SHOW
        select GENERIC_IOMAP
        select GENERIC_ATOMIC64
@@ -110,3 +111,6 @@ source "security/Kconfig"
 source "crypto/Kconfig"
 
 source "lib/Kconfig"
+
+config NO_IOMEM
+       def_bool y
index 974aefe861233b8615ed61253a6f7a1d4b21085c..9e3e060290e0895529c864d7e6892e0c47708f43 100644 (file)
@@ -20,8 +20,8 @@ cflags-y += -G0 -pipe -mel -mnhwloop -D__SCOREEL__ \
 #
 KBUILD_AFLAGS += $(cflags-y)
 KBUILD_CFLAGS += $(cflags-y)
-KBUILD_AFLAGS_MODULE += -mlong-calls
-KBUILD_CFLAGS_MODULE += -mlong-calls
+KBUILD_AFLAGS_MODULE +=
+KBUILD_CFLAGS_MODULE +=
 LDFLAGS += --oformat elf32-littlescore
 LDFLAGS_vmlinux        += -G0 -static -nostdlib
 
index f909ac3144a45a895751d038fbfea7495be59898..961bd64015a817b50452499d992ce23f14291247 100644 (file)
@@ -184,48 +184,57 @@ static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
                                __wsum sum)
 {
        __asm__ __volatile__(
-               ".set\tnoreorder\t\t\t# csum_ipv6_magic\n\t"
-               ".set\tnoat\n\t"
-               "addu\t%0, %5\t\t\t# proto (long in network byte order)\n\t"
-               "sltu\t$1, %0, %5\n\t"
-               "addu\t%0, $1\n\t"
-               "addu\t%0, %6\t\t\t# csum\n\t"
-               "sltu\t$1, %0, %6\n\t"
-               "lw\t%1, 0(%2)\t\t\t# four words source address\n\t"
-               "addu\t%0, $1\n\t"
-               "addu\t%0, %1\n\t"
-               "sltu\t$1, %0, %1\n\t"
-               "lw\t%1, 4(%2)\n\t"
-               "addu\t%0, $1\n\t"
-               "addu\t%0, %1\n\t"
-               "sltu\t$1, %0, %1\n\t"
-               "lw\t%1, 8(%2)\n\t"
-               "addu\t%0, $1\n\t"
-               "addu\t%0, %1\n\t"
-               "sltu\t$1, %0, %1\n\t"
-               "lw\t%1, 12(%2)\n\t"
-               "addu\t%0, $1\n\t"
-               "addu\t%0, %1\n\t"
-               "sltu\t$1, %0, %1\n\t"
-               "lw\t%1, 0(%3)\n\t"
-               "addu\t%0, $1\n\t"
-               "addu\t%0, %1\n\t"
-               "sltu\t$1, %0, %1\n\t"
-               "lw\t%1, 4(%3)\n\t"
-               "addu\t%0, $1\n\t"
-               "addu\t%0, %1\n\t"
-               "sltu\t$1, %0, %1\n\t"
-               "lw\t%1, 8(%3)\n\t"
-               "addu\t%0, $1\n\t"
-               "addu\t%0, %1\n\t"
-               "sltu\t$1, %0, %1\n\t"
-               "lw\t%1, 12(%3)\n\t"
-               "addu\t%0, $1\n\t"
-               "addu\t%0, %1\n\t"
-               "sltu\t$1, %0, %1\n\t"
-               "addu\t%0, $1\t\t\t# Add final carry\n\t"
-               ".set\tnoat\n\t"
-               ".set\tnoreorder"
+               ".set\tvolatile\t\t\t# csum_ipv6_magic\n\t"
+               "add\t%0, %0, %5\t\t\t# proto (long in network byte order)\n\t"
+               "cmp.c\t%5, %0\n\t"
+               "bleu 1f\n\t"
+               "addi\t%0, 0x1\n\t"
+               "1:add\t%0, %0, %6\t\t\t# csum\n\t"
+               "cmp.c\t%6, %0\n\t"
+               "lw\t%1, [%2, 0]\t\t\t# four words source address\n\t"
+               "bleu 1f\n\t"
+               "addi\t%0, 0x1\n\t"
+               "1:add\t%0, %0, %1\n\t"
+               "cmp.c\t%1, %0\n\t"
+               "1:lw\t%1, [%2, 4]\n\t"
+               "bleu 1f\n\t"
+               "addi\t%0, 0x1\n\t"
+               "1:add\t%0, %0, %1\n\t"
+               "cmp.c\t%1, %0\n\t"
+               "lw\t%1, [%2,8]\n\t"
+               "bleu 1f\n\t"
+               "addi\t%0, 0x1\n\t"
+               "1:add\t%0, %0, %1\n\t"
+               "cmp.c\t%1, %0\n\t"
+               "lw\t%1, [%2, 12]\n\t"
+               "bleu 1f\n\t"
+               "addi\t%0, 0x1\n\t"
+               "1:add\t%0, %0,%1\n\t"
+               "cmp.c\t%1, %0\n\t"
+               "lw\t%1, [%3, 0]\n\t"
+               "bleu 1f\n\t"
+               "addi\t%0, 0x1\n\t"
+               "1:add\t%0, %0, %1\n\t"
+               "cmp.c\t%1, %0\n\t"
+               "lw\t%1, [%3, 4]\n\t"
+               "bleu 1f\n\t"
+               "addi\t%0, 0x1\n\t"
+               "1:add\t%0, %0, %1\n\t"
+               "cmp.c\t%1, %0\n\t"
+               "lw\t%1, [%3, 8]\n\t"
+               "bleu 1f\n\t"
+               "addi\t%0, 0x1\n\t"
+               "1:add\t%0, %0, %1\n\t"
+               "cmp.c\t%1, %0\n\t"
+               "lw\t%1, [%3, 12]\n\t"
+               "bleu 1f\n\t"
+               "addi\t%0, 0x1\n\t"
+               "1:add\t%0, %0, %1\n\t"
+               "cmp.c\t%1, %0\n\t"
+               "bleu 1f\n\t"
+               "addi\t%0, 0x1\n\t"
+               "1:\n\t"
+               ".set\toptimize"
                : "=r" (sum), "=r" (proto)
                : "r" (saddr), "r" (daddr),
                  "0" (htonl(len)), "1" (htonl(proto)), "r" (sum));
index fbbfd7132e3b30b6d338fec1f9ea8f7572a76d7d..574c8827abe2381f2db65dfaf3a8f889cf52987b 100644 (file)
@@ -5,5 +5,4 @@
 
 #define virt_to_bus    virt_to_phys
 #define bus_to_virt    phys_to_virt
-
 #endif /* _ASM_SCORE_IO_H */
index 059a61b7071b2ab4d8e5ada5e9d1aa732b435206..716b3fd1d86397ea57389a106516eb12073dde43 100644 (file)
@@ -2,7 +2,7 @@
 #define _ASM_SCORE_PGALLOC_H
 
 #include <linux/mm.h>
-
+#include <linux/highmem.h>
 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
        pte_t *pte)
 {
index 7234ed09b7b7ef5e815b749d8f364921a04b6500..befb87d30a89a0d9d21a774fb8d542963224a6ff 100644 (file)
@@ -264,7 +264,7 @@ resume_kernel:
        disable_irq
        lw      r8, [r28, TI_PRE_COUNT]
        cmpz.c  r8
-       bne     r8, restore_all
+       bne     restore_all
 need_resched:
        lw      r8, [r28, TI_FLAGS]
        andri.c r9, r8, _TIF_NEED_RESCHED
@@ -415,7 +415,7 @@ ENTRY(handle_sys)
        sw      r9, [r0, PT_EPC]
 
        cmpi.c  r27, __NR_syscalls      # check syscall number
-       bgeu    illegal_syscall
+       bcs     illegal_syscall
 
        slli    r8, r27, 2              # get syscall routine
        la      r11, sys_call_table
index f4c6d02421d3168cd8b17e87906490b214e7de96..a1519ad3d49d68e0e3202ecadb5b6e1b027a7df3 100644 (file)
@@ -78,8 +78,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
        p->thread.reg0 = (unsigned long) childregs;
        if (unlikely(p->flags & PF_KTHREAD)) {
                memset(childregs, 0, sizeof(struct pt_regs));
-               p->thread->reg12 = usp;
-               p->thread->reg13 = arg;
+               p->thread.reg12 = usp;
+               p->thread.reg13 = arg;
                p->thread.reg3 = (unsigned long) ret_from_kernel_thread;
        } else {
                *childregs = *current_pt_regs();
index 4e1d66c3ce71b0b89bc2acb86643b3cebd445a06..0f21e9a5ca18879f5d919aa0fa05532204244eba 100644 (file)
@@ -72,6 +72,8 @@
 
 #define SO_BUSY_POLL           0x0030
 
+#define SO_MAX_PACING_RATE     0x0031
+
 /* Security levels - as per NRL IPv6 - don't actually do anything */
 #define SO_SECURITY_AUTHENTICATION             0x5001
 #define SO_SECURITY_ENCRYPTION_TRANSPORT       0x5002
index 62d6b153ffa2e82d895a4e81b51b2abe9490aee8..4d9ac8406f323a96e895d7bf482eaa18a97974c6 100644 (file)
@@ -851,7 +851,7 @@ void ldom_reboot(const char *boot_command)
 
                strcpy(full_boot_str, "boot ");
                strlcpy(full_boot_str + strlen("boot "), boot_command,
-                       sizeof(full_boot_str + strlen("boot ")));
+                       sizeof(full_boot_str));
                len = strlen(full_boot_str);
 
                if (reboot_data_supported) {
index 6aef9fbc09b7a48ec82c13e04b1f8c761dc34661..b913915e8e631f9c9ec2996fde3862bd33e2edce 100644 (file)
@@ -79,30 +79,38 @@ static inline int phys_to_machine_mapping_valid(unsigned long pfn)
        return get_phys_to_machine(pfn) != INVALID_P2M_ENTRY;
 }
 
-static inline unsigned long mfn_to_pfn(unsigned long mfn)
+static inline unsigned long mfn_to_pfn_no_overrides(unsigned long mfn)
 {
        unsigned long pfn;
-       int ret = 0;
+       int ret;
 
        if (xen_feature(XENFEAT_auto_translated_physmap))
                return mfn;
 
-       if (unlikely(mfn >= machine_to_phys_nr)) {
-               pfn = ~0;
-               goto try_override;
-       }
-       pfn = 0;
+       if (unlikely(mfn >= machine_to_phys_nr))
+               return ~0;
+
        /*
         * The array access can fail (e.g., device space beyond end of RAM).
         * In such cases it doesn't matter what we return (we return garbage),
         * but we must handle the fault without crashing!
         */
        ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
-try_override:
-       /* ret might be < 0 if there are no entries in the m2p for mfn */
        if (ret < 0)
-               pfn = ~0;
-       else if (get_phys_to_machine(pfn) != mfn)
+               return ~0;
+
+       return pfn;
+}
+
+static inline unsigned long mfn_to_pfn(unsigned long mfn)
+{
+       unsigned long pfn;
+
+       if (xen_feature(XENFEAT_auto_translated_physmap))
+               return mfn;
+
+       pfn = mfn_to_pfn_no_overrides(mfn);
+       if (get_phys_to_machine(pfn) != mfn) {
                /*
                 * If this appears to be a foreign mfn (because the pfn
                 * doesn't map back to the mfn), then check the local override
@@ -111,6 +119,7 @@ try_override:
                 * m2p_find_override_pfn returns ~0 if it doesn't find anything.
                 */
                pfn = m2p_find_override_pfn(mfn, ~0);
+       }
 
        /* 
         * pfn is ~0 if there are no entries in the m2p for mfn or if the
index 8355c84b9729df767945c5f4c8a5f7c5db15fab8..897783b3302a9cd0d0af3efad49c94e45f9f7e9e 100644 (file)
@@ -1506,7 +1506,7 @@ static int __init init_hw_perf_events(void)
                err = amd_pmu_init();
                break;
        default:
-               return 0;
+               err = -ENOTSUPP;
        }
        if (err != 0) {
                pr_cont("no PMU driver, software events only.\n");
@@ -1883,9 +1883,9 @@ static struct pmu pmu = {
 
 void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
 {
-       userpg->cap_usr_time = 0;
-       userpg->cap_usr_time_zero = 0;
-       userpg->cap_usr_rdpmc = x86_pmu.attr_rdpmc;
+       userpg->cap_user_time = 0;
+       userpg->cap_user_time_zero = 0;
+       userpg->cap_user_rdpmc = x86_pmu.attr_rdpmc;
        userpg->pmc_width = x86_pmu.cntval_bits;
 
        if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
@@ -1894,13 +1894,13 @@ void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
        if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
                return;
 
-       userpg->cap_usr_time = 1;
+       userpg->cap_user_time = 1;
        userpg->time_mult = this_cpu_read(cyc2ns);
        userpg->time_shift = CYC2NS_SCALE_FACTOR;
        userpg->time_offset = this_cpu_read(cyc2ns_offset) - now;
 
        if (sched_clock_stable && !check_tsc_disabled()) {
-               userpg->cap_usr_time_zero = 1;
+               userpg->cap_user_time_zero = 1;
                userpg->time_zero = this_cpu_read(cyc2ns_offset);
        }
 }
index 9db76c31b3c311bf1f78633f87bef6853cb3db6b..f31a1655d1ff5bd602239e211b14bdd28d95a79a 100644 (file)
@@ -2325,6 +2325,7 @@ __init int intel_pmu_init(void)
                break;
 
        case 55: /* Atom 22nm "Silvermont" */
+       case 77: /* Avoton "Silvermont" */
                memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
                        sizeof(hw_cache_event_ids));
                memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs,
index 8ed44589b0e486eda5a45efa74f3f4d3836a730f..4118f9f683151e99402740f1882b205a261a1465 100644 (file)
@@ -2706,14 +2706,14 @@ static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box)
        box->hrtimer.function = uncore_pmu_hrtimer;
 }
 
-struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, int cpu)
+static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, int node)
 {
        struct intel_uncore_box *box;
        int i, size;
 
        size = sizeof(*box) + type->num_shared_regs * sizeof(struct intel_uncore_extra_reg);
 
-       box = kzalloc_node(size, GFP_KERNEL, cpu_to_node(cpu));
+       box = kzalloc_node(size, GFP_KERNEL, node);
        if (!box)
                return NULL;
 
@@ -3031,7 +3031,7 @@ static int uncore_validate_group(struct intel_uncore_pmu *pmu,
        struct intel_uncore_box *fake_box;
        int ret = -EINVAL, n;
 
-       fake_box = uncore_alloc_box(pmu->type, smp_processor_id());
+       fake_box = uncore_alloc_box(pmu->type, NUMA_NO_NODE);
        if (!fake_box)
                return -ENOMEM;
 
@@ -3294,7 +3294,7 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id
        }
 
        type = pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)];
-       box = uncore_alloc_box(type, 0);
+       box = uncore_alloc_box(type, NUMA_NO_NODE);
        if (!box)
                return -ENOMEM;
 
@@ -3499,7 +3499,7 @@ static int uncore_cpu_prepare(int cpu, int phys_id)
                        if (pmu->func_id < 0)
                                pmu->func_id = j;
 
-                       box = uncore_alloc_box(type, cpu);
+                       box = uncore_alloc_box(type, cpu_to_node(cpu));
                        if (!box)
                                return -ENOMEM;
 
index 7123b5df479d872def8ff437fcd407c5c4d5ca50..af99f71aeb7f159a6ba7f1da0596ae2843d67e47 100644 (file)
@@ -216,6 +216,7 @@ int apply_microcode_amd(int cpu)
        /* need to apply patch? */
        if (rev >= mc_amd->hdr.patch_id) {
                c->microcode = rev;
+               uci->cpu_sig.rev = rev;
                return 0;
        }
 
index 563ed91e6faa3a2adac62ddeb8caff7e5ad50f24..e643e744e4d8bf855ce1889bdae58b51001aab45 100644 (file)
@@ -352,12 +352,28 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
        },
        {       /* Handle problems with rebooting on the Precision M6600. */
                .callback = set_pci_reboot,
-               .ident = "Dell OptiPlex 990",
+               .ident = "Dell Precision M6600",
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
                        DMI_MATCH(DMI_PRODUCT_NAME, "Precision M6600"),
                },
        },
+       {       /* Handle problems with rebooting on the Dell PowerEdge C6100. */
+               .callback = set_pci_reboot,
+               .ident = "Dell PowerEdge C6100",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "C6100"),
+               },
+       },
+       {       /* Some C6100 machines were shipped with vendor being 'Dell'. */
+               .callback = set_pci_reboot,
+               .ident = "Dell PowerEdge C6100",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "C6100"),
+               },
+       },
        { }
 };
 
index a1216de9ffda3b8ed6c38565c66e1e5c7c87c313..3b8e7459dd4db84a4c6f72816cd2c0629d866ee4 100644 (file)
@@ -5345,7 +5345,9 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
         * There are errata that may cause this bit to not be set:
         * AAK134, BY25.
         */
-       if (exit_qualification & INTR_INFO_UNBLOCK_NMI)
+       if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
+                       cpu_has_virtual_nmis() &&
+                       (exit_qualification & INTR_INFO_UNBLOCK_NMI))
                vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI);
 
        gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
index 90f6ed127096566ab06c0a9e6f25a355ccdfc048..c7e22ab29a5a2eb6ce3dd75f0fd3e0b26be2d61b 100644 (file)
@@ -912,10 +912,13 @@ void __init efi_enter_virtual_mode(void)
 
        for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
                md = p;
-               if (!(md->attribute & EFI_MEMORY_RUNTIME) &&
-                   md->type != EFI_BOOT_SERVICES_CODE &&
-                   md->type != EFI_BOOT_SERVICES_DATA)
-                       continue;
+               if (!(md->attribute & EFI_MEMORY_RUNTIME)) {
+#ifdef CONFIG_X86_64
+                       if (md->type != EFI_BOOT_SERVICES_CODE &&
+                           md->type != EFI_BOOT_SERVICES_DATA)
+#endif
+                               continue;
+               }
 
                size = md->num_pages << EFI_PAGE_SHIFT;
                end = md->phys_addr + size;
index 8b901e8d782dadf00091ac88a6fc859c54c4d1de..a61c7d5811beac47e2549cd6fd44118bc5bf7b28 100644 (file)
@@ -879,7 +879,6 @@ int m2p_add_override(unsigned long mfn, struct page *page,
        unsigned long uninitialized_var(address);
        unsigned level;
        pte_t *ptep = NULL;
-       int ret = 0;
 
        pfn = page_to_pfn(page);
        if (!PageHighMem(page)) {
@@ -926,8 +925,8 @@ int m2p_add_override(unsigned long mfn, struct page *page,
         * frontend pages while they are being shared with the backend,
         * because mfn_to_pfn (that ends up being called by GUPF) will
         * return the backend pfn rather than the frontend pfn. */
-       ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
-       if (ret == 0 && get_phys_to_machine(pfn) == mfn)
+       pfn = mfn_to_pfn_no_overrides(mfn);
+       if (get_phys_to_machine(pfn) == mfn)
                set_phys_to_machine(pfn, FOREIGN_FRAME(mfn));
 
        return 0;
@@ -942,7 +941,6 @@ int m2p_remove_override(struct page *page,
        unsigned long uninitialized_var(address);
        unsigned level;
        pte_t *ptep = NULL;
-       int ret = 0;
 
        pfn = page_to_pfn(page);
        mfn = get_phys_to_machine(pfn);
@@ -1029,8 +1027,8 @@ int m2p_remove_override(struct page *page,
         * the original pfn causes mfn_to_pfn(mfn) to return the frontend
         * pfn again. */
        mfn &= ~FOREIGN_FRAME_BIT;
-       ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
-       if (ret == 0 && get_phys_to_machine(pfn) == FOREIGN_FRAME(mfn) &&
+       pfn = mfn_to_pfn_no_overrides(mfn);
+       if (get_phys_to_machine(pfn) == FOREIGN_FRAME(mfn) &&
                        m2p_find_override(mfn) == NULL)
                set_phys_to_machine(pfn, mfn);
 
index 253f63fceea104978c6e5d1f54ba81e1e3581b20..be6b8607895748304c860c61603d24a29bacfa34 100644 (file)
@@ -259,6 +259,14 @@ void xen_uninit_lock_cpu(int cpu)
 }
 
 
+/*
+ * Our init of PV spinlocks is split in two init functions due to us
+ * using paravirt patching and jump labels patching and having to do
+ * all of this before SMP code is invoked.
+ *
+ * The paravirt patching needs to be done _before_ the alternative asm code
+ * is started, otherwise we would not patch the core kernel code.
+ */
 void __init xen_init_spinlocks(void)
 {
 
@@ -267,12 +275,26 @@ void __init xen_init_spinlocks(void)
                return;
        }
 
-       static_key_slow_inc(&paravirt_ticketlocks_enabled);
-
        pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(xen_lock_spinning);
        pv_lock_ops.unlock_kick = xen_unlock_kick;
 }
 
+/*
+ * While the jump_label init code needs to happend _after_ the jump labels are
+ * enabled and before SMP is started. Hence we use pre-SMP initcall level
+ * init. We cannot do it in xen_init_spinlocks as that is done before
+ * jump labels are activated.
+ */
+static __init int xen_init_spinlocks_jump(void)
+{
+       if (!xen_pvspin)
+               return 0;
+
+       static_key_slow_inc(&paravirt_ticketlocks_enabled);
+       return 0;
+}
+early_initcall(xen_init_spinlocks_jump);
+
 static __init int xen_parse_nopvspin(char *arg)
 {
        xen_pvspin = false;
index c114483010c13b70caf62b3edea8b46947ca17e8..7db5c22faa68a1803cad4708d9047ad8396b637e 100644 (file)
@@ -87,4 +87,6 @@
 
 #define SO_BUSY_POLL           46
 
+#define SO_MAX_PACING_RATE     47
+
 #endif /* _XTENSA_SOCKET_H */
index 7f38e40fee0819a74ec6b3d10c759c07d3b04bb4..2429515c05c2ed1b6f457a60def817c33c178651 100644 (file)
@@ -99,11 +99,16 @@ config BLK_DEV_THROTTLING
 
        See Documentation/cgroups/blkio-controller.txt for more information.
 
-config CMDLINE_PARSER
+config BLK_CMDLINE_PARSER
        bool "Block device command line partition parser"
        default n
        ---help---
-       Parsing command line, get the partitions information.
+       Enabling this option allows you to specify the partition layout from
+       the kernel boot args.  This is typically of use for embedded devices
+       which don't otherwise have any standardized method for listing the
+       partitions on a block device.
+
+       See Documentation/block/cmdline-partition.txt for more information.
 
 menu "Partition Types"
 
index 4fa4be544ece94c05d0956a0c6f488c1f7a65161..671a83d063a5ba290c93efc08457eba6217ea80d 100644 (file)
@@ -18,4 +18,4 @@ obj-$(CONFIG_IOSCHED_CFQ)     += cfq-iosched.o
 
 obj-$(CONFIG_BLOCK_COMPAT)     += compat_ioctl.o
 obj-$(CONFIG_BLK_DEV_INTEGRITY)        += blk-integrity.o
-obj-$(CONFIG_CMDLINE_PARSER)   += cmdline-parser.o
+obj-$(CONFIG_BLK_CMDLINE_PARSER)       += cmdline-parser.o
index e90c7c164c83b8dc58f36393bb5748c0110e6184..4e491d9b529255476939b8608d91718c40140059 100644 (file)
@@ -235,8 +235,13 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
        blkg->online = true;
        spin_unlock(&blkcg->lock);
 
-       if (!ret)
+       if (!ret) {
+               if (blkcg == &blkcg_root) {
+                       q->root_blkg = blkg;
+                       q->root_rl.blkg = blkg;
+               }
                return blkg;
+       }
 
        /* @blkg failed fully initialized, use the usual release path */
        blkg_put(blkg);
@@ -334,6 +339,15 @@ static void blkg_destroy(struct blkcg_gq *blkg)
        if (rcu_dereference_raw(blkcg->blkg_hint) == blkg)
                rcu_assign_pointer(blkcg->blkg_hint, NULL);
 
+       /*
+        * If root blkg is destroyed.  Just clear the pointer since root_rl
+        * does not take reference on root blkg.
+        */
+       if (blkcg == &blkcg_root) {
+               blkg->q->root_blkg = NULL;
+               blkg->q->root_rl.blkg = NULL;
+       }
+
        /*
         * Put the reference taken at the time of creation so that when all
         * queues are gone, group can be destroyed.
@@ -360,13 +374,6 @@ static void blkg_destroy_all(struct request_queue *q)
                blkg_destroy(blkg);
                spin_unlock(&blkcg->lock);
        }
-
-       /*
-        * root blkg is destroyed.  Just clear the pointer since
-        * root_rl does not take reference on root blkg.
-        */
-       q->root_blkg = NULL;
-       q->root_rl.blkg = NULL;
 }
 
 /*
@@ -970,8 +977,6 @@ int blkcg_activate_policy(struct request_queue *q,
                ret = PTR_ERR(blkg);
                goto out_unlock;
        }
-       q->root_blkg = blkg;
-       q->root_rl.blkg = blkg;
 
        list_for_each_entry(blkg, &q->blkg_list, q_node)
                cnt++;
index c04505358342e132a0a3164cd154d12d52a7ad98..0a00e4ecf87cae37a3d310c8dfd32869ac24c161 100644 (file)
@@ -1549,11 +1549,9 @@ get_rq:
        if (plug) {
                /*
                 * If this is the first request added after a plug, fire
-                * of a plug trace. If others have been added before, check
-                * if we have multiple devices in this plug. If so, make a
-                * note to sort the list before dispatch.
+                * of a plug trace.
                 */
-               if (list_empty(&plug->list))
+               if (!request_count)
                        trace_block_plug(q);
                else {
                        if (request_count >= BLK_MAX_REQUEST_COUNT) {
index e7062139612914b95917405fc3da52e4498f066d..ae4f27d7944e9a662ddbb51464bd18e10f4707a4 100644 (file)
@@ -68,9 +68,9 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
        spin_lock_irq(q->queue_lock);
 
        if (unlikely(blk_queue_dying(q))) {
+               rq->cmd_flags |= REQ_QUIET; 
                rq->errors = -ENXIO;
-               if (rq->end_io)
-                       rq->end_io(rq, rq->errors);
+               __blk_end_request_all(rq, rq->errors);
                spin_unlock_irq(q->queue_lock);
                return;
        }
index dabb9d02cf9a509655f67ae169ac0c3a706d8f8e..434944cbd761884f0f6d9ffdfe96adc00afd1e5c 100644 (file)
@@ -1803,7 +1803,7 @@ static u64 cfqg_prfill_avg_queue_size(struct seq_file *sf,
 
        if (samples) {
                v = blkg_stat_read(&cfqg->stats.avg_queue_size_sum);
-               do_div(v, samples);
+               v = div64_u64(v, samples);
        }
        __blkg_prfill_u64(sf, pd, v);
        return 0;
@@ -4358,7 +4358,7 @@ static int cfq_init_queue(struct request_queue *q, struct elevator_type *e)
        if (!eq)
                return -ENOMEM;
 
-       cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
+       cfqd = kzalloc_node(sizeof(*cfqd), GFP_KERNEL, q->node);
        if (!cfqd) {
                kobject_put(&eq->kobj);
                return -ENOMEM;
index 20614a33236220d0e79d99a2cfaf99822327bc57..9ef66406c625412b79296d6e4f9186611b95b6f6 100644 (file)
@@ -346,7 +346,7 @@ static int deadline_init_queue(struct request_queue *q, struct elevator_type *e)
        if (!eq)
                return -ENOMEM;
 
-       dd = kmalloc_node(sizeof(*dd), GFP_KERNEL | __GFP_ZERO, q->node);
+       dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
        if (!dd) {
                kobject_put(&eq->kobj);
                return -ENOMEM;
index 668394d185885bc3fcc878697878a4cb07be899e..2bcbd8cc14d4b23780a3cfc262857a87f7c5d67e 100644 (file)
@@ -155,7 +155,7 @@ struct elevator_queue *elevator_alloc(struct request_queue *q,
 {
        struct elevator_queue *eq;
 
-       eq = kmalloc_node(sizeof(*eq), GFP_KERNEL | __GFP_ZERO, q->node);
+       eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node);
        if (unlikely(!eq))
                goto err;
 
index dadf42b454a383299231fa46abee1631aab53196..791f419431322882a915f88995df7b72552d5507 100644 (file)
@@ -1252,8 +1252,7 @@ struct gendisk *alloc_disk_node(int minors, int node_id)
 {
        struct gendisk *disk;
 
-       disk = kmalloc_node(sizeof(struct gendisk),
-                               GFP_KERNEL | __GFP_ZERO, node_id);
+       disk = kzalloc_node(sizeof(struct gendisk), GFP_KERNEL, node_id);
        if (disk) {
                if (!init_part_stats(&disk->part0)) {
                        kfree(disk);
index 87a32086535d5228b513af3935d29765b763b2ec..9b29a996c3114a3d09c41abdcaaa0f3f19abe3ff 100644 (file)
@@ -263,7 +263,7 @@ config SYSV68_PARTITION
 
 config CMDLINE_PARTITION
        bool "Command line partition support" if PARTITION_ADVANCED
-       select CMDLINE_PARSER
+       select BLK_CMDLINE_PARSER
        help
-         Say Y here if you would read the partitions table from bootargs.
+         Say Y here if you want to read the partition table from bootargs.
          The format for the command line is just like mtdparts.
index 56cf4ffad51ed903128ac079d9845acafbd639a4..5141b563adf1b9a41c6b6054a8d2dcf5618da4d6 100644 (file)
@@ -2,15 +2,15 @@
  * Copyright (C) 2013 HUAWEI
  * Author: Cai Zhiyong <caizhiyong@huawei.com>
  *
- * Read block device partition table from command line.
- * The partition used for fixed block device (eMMC) embedded device.
- * It is no MBR, save storage space. Bootloader can be easily accessed
+ * Read block device partition table from the command line.
+ * Typically used for fixed block (eMMC) embedded devices.
+ * It has no MBR, so saves storage space. Bootloader can be easily accessed
  * by absolute address of data on the block device.
  * Users can easily change the partition.
  *
  * The format for the command line is just like mtdparts.
  *
- * Verbose config please reference "Documentation/block/cmdline-partition.txt"
+ * For further information, see "Documentation/block/cmdline-partition.txt"
  *
  */
 
index f40acef80269fa9e6a38435b1c328a7c80552b34..a6977e12d5745ab5f2015e2f2879174bddd64bc0 100644 (file)
@@ -39,6 +39,7 @@
 #include <linux/ipmi.h>
 #include <linux/device.h>
 #include <linux/pnp.h>
+#include <linux/spinlock.h>
 
 MODULE_AUTHOR("Zhao Yakui");
 MODULE_DESCRIPTION("ACPI IPMI Opregion driver");
@@ -57,7 +58,7 @@ struct acpi_ipmi_device {
        struct list_head head;
        /* the IPMI request message list */
        struct list_head tx_msg_list;
-       struct mutex    tx_msg_lock;
+       spinlock_t      tx_msg_lock;
        acpi_handle handle;
        struct pnp_dev *pnp_dev;
        ipmi_user_t     user_interface;
@@ -147,6 +148,7 @@ static void acpi_format_ipmi_msg(struct acpi_ipmi_msg *tx_msg,
        struct kernel_ipmi_msg *msg;
        struct acpi_ipmi_buffer *buffer;
        struct acpi_ipmi_device *device;
+       unsigned long flags;
 
        msg = &tx_msg->tx_message;
        /*
@@ -177,10 +179,10 @@ static void acpi_format_ipmi_msg(struct acpi_ipmi_msg *tx_msg,
 
        /* Get the msgid */
        device = tx_msg->device;
-       mutex_lock(&device->tx_msg_lock);
+       spin_lock_irqsave(&device->tx_msg_lock, flags);
        device->curr_msgid++;
        tx_msg->tx_msgid = device->curr_msgid;
-       mutex_unlock(&device->tx_msg_lock);
+       spin_unlock_irqrestore(&device->tx_msg_lock, flags);
 }
 
 static void acpi_format_ipmi_response(struct acpi_ipmi_msg *msg,
@@ -242,6 +244,7 @@ static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
        int msg_found = 0;
        struct acpi_ipmi_msg *tx_msg;
        struct pnp_dev *pnp_dev = ipmi_device->pnp_dev;
+       unsigned long flags;
 
        if (msg->user != ipmi_device->user_interface) {
                dev_warn(&pnp_dev->dev, "Unexpected response is returned. "
@@ -250,7 +253,7 @@ static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
                ipmi_free_recv_msg(msg);
                return;
        }
-       mutex_lock(&ipmi_device->tx_msg_lock);
+       spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
        list_for_each_entry(tx_msg, &ipmi_device->tx_msg_list, head) {
                if (msg->msgid == tx_msg->tx_msgid) {
                        msg_found = 1;
@@ -258,7 +261,7 @@ static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
                }
        }
 
-       mutex_unlock(&ipmi_device->tx_msg_lock);
+       spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
        if (!msg_found) {
                dev_warn(&pnp_dev->dev, "Unexpected response (msg id %ld) is "
                        "returned.\n", msg->msgid);
@@ -378,6 +381,7 @@ acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
        struct acpi_ipmi_device *ipmi_device = handler_context;
        int err, rem_time;
        acpi_status status;
+       unsigned long flags;
        /*
         * IPMI opregion message.
         * IPMI message is firstly written to the BMC and system software
@@ -395,9 +399,9 @@ acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
                return AE_NO_MEMORY;
 
        acpi_format_ipmi_msg(tx_msg, address, value);
-       mutex_lock(&ipmi_device->tx_msg_lock);
+       spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
        list_add_tail(&tx_msg->head, &ipmi_device->tx_msg_list);
-       mutex_unlock(&ipmi_device->tx_msg_lock);
+       spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
        err = ipmi_request_settime(ipmi_device->user_interface,
                                        &tx_msg->addr,
                                        tx_msg->tx_msgid,
@@ -413,9 +417,9 @@ acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
        status = AE_OK;
 
 end_label:
-       mutex_lock(&ipmi_device->tx_msg_lock);
+       spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
        list_del(&tx_msg->head);
-       mutex_unlock(&ipmi_device->tx_msg_lock);
+       spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
        kfree(tx_msg);
        return status;
 }
@@ -457,7 +461,7 @@ static void acpi_add_ipmi_device(struct acpi_ipmi_device *ipmi_device)
 
        INIT_LIST_HEAD(&ipmi_device->head);
 
-       mutex_init(&ipmi_device->tx_msg_lock);
+       spin_lock_init(&ipmi_device->tx_msg_lock);
        INIT_LIST_HEAD(&ipmi_device->tx_msg_list);
        ipmi_install_space_handler(ipmi_device);
 
index fbdb82e70d10623c0bbf94aa73723a40fd32e77d..611ce9061dc54c409160538df58fc3eb2c6c4676 100644 (file)
@@ -1121,7 +1121,7 @@ int acpi_bus_register_driver(struct acpi_driver *driver)
 EXPORT_SYMBOL(acpi_bus_register_driver);
 
 /**
- * acpi_bus_unregister_driver - unregisters a driver with the APIC bus
+ * acpi_bus_unregister_driver - unregisters a driver with the ACPI bus
  * @driver: driver to unregister
  *
  * Unregisters a driver with the ACPI bus.  Searches the namespace for all
index 958ba2a420c34b0e5411b116d639214e700f5c47..97f4acb54ad626795972ffb8e35572101d05d08d 100644 (file)
@@ -2,7 +2,7 @@
  *  sata_promise.c - Promise SATA
  *
  *  Maintained by:  Tejun Heo <tj@kernel.org>
- *                 Mikael Pettersson <mikpe@it.uu.se>
+ *                 Mikael Pettersson
  *                 Please ALWAYS copy linux-ide@vger.kernel.org
  *                 on emails.
  *
index c7cfadcf67521d82f7d977979631e4805f5a31e9..34abf4d8a45ff4f3f6d25787887a8bc787779133 100644 (file)
@@ -2017,7 +2017,7 @@ EXPORT_SYMBOL_GPL(device_move);
  */
 void device_shutdown(void)
 {
-       struct device *dev;
+       struct device *dev, *parent;
 
        spin_lock(&devices_kset->list_lock);
        /*
@@ -2034,7 +2034,7 @@ void device_shutdown(void)
                 * prevent it from being freed because parent's
                 * lock is to be held
                 */
-               get_device(dev->parent);
+               parent = get_device(dev->parent);
                get_device(dev);
                /*
                 * Make sure the device is off the kset list, in the
@@ -2044,8 +2044,8 @@ void device_shutdown(void)
                spin_unlock(&devices_kset->list_lock);
 
                /* hold lock to avoid race with probe/release */
-               if (dev->parent)
-                       device_lock(dev->parent);
+               if (parent)
+                       device_lock(parent);
                device_lock(dev);
 
                /* Don't allow any more runtime suspends */
@@ -2063,11 +2063,11 @@ void device_shutdown(void)
                }
 
                device_unlock(dev);
-               if (dev->parent)
-                       device_unlock(dev->parent);
+               if (parent)
+                       device_unlock(parent);
 
                put_device(dev);
-               put_device(dev->parent);
+               put_device(parent);
 
                spin_lock(&devices_kset->list_lock);
        }
index c9fd6943ce456a1123e58106b0f028418d535fdd..50329d1057ed5dc5c929fde89237add36c55ea48 100644 (file)
@@ -210,25 +210,6 @@ static void bcma_core_pci_config_fixup(struct bcma_drv_pci *pc)
        }
 }
 
-static void bcma_core_pci_power_save(struct bcma_drv_pci *pc, bool up)
-{
-       u16 data;
-
-       if (pc->core->id.rev >= 15 && pc->core->id.rev <= 20) {
-               data = up ? 0x74 : 0x7C;
-               bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
-                                        BCMA_CORE_PCI_MDIO_BLK1_MGMT1, 0x7F64);
-               bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
-                                        BCMA_CORE_PCI_MDIO_BLK1_MGMT3, data);
-       } else if (pc->core->id.rev >= 21 && pc->core->id.rev <= 22) {
-               data = up ? 0x75 : 0x7D;
-               bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
-                                        BCMA_CORE_PCI_MDIO_BLK1_MGMT1, 0x7E65);
-               bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
-                                        BCMA_CORE_PCI_MDIO_BLK1_MGMT3, data);
-       }
-}
-
 /**************************************************
  * Init.
  **************************************************/
@@ -255,6 +236,32 @@ void bcma_core_pci_init(struct bcma_drv_pci *pc)
                bcma_core_pci_clientmode_init(pc);
 }
 
+void bcma_core_pci_power_save(struct bcma_bus *bus, bool up)
+{
+       struct bcma_drv_pci *pc;
+       u16 data;
+
+       if (bus->hosttype != BCMA_HOSTTYPE_PCI)
+               return;
+
+       pc = &bus->drv_pci[0];
+
+       if (pc->core->id.rev >= 15 && pc->core->id.rev <= 20) {
+               data = up ? 0x74 : 0x7C;
+               bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
+                                        BCMA_CORE_PCI_MDIO_BLK1_MGMT1, 0x7F64);
+               bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
+                                        BCMA_CORE_PCI_MDIO_BLK1_MGMT3, data);
+       } else if (pc->core->id.rev >= 21 && pc->core->id.rev <= 22) {
+               data = up ? 0x75 : 0x7D;
+               bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
+                                        BCMA_CORE_PCI_MDIO_BLK1_MGMT1, 0x7E65);
+               bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
+                                        BCMA_CORE_PCI_MDIO_BLK1_MGMT3, data);
+       }
+}
+EXPORT_SYMBOL_GPL(bcma_core_pci_power_save);
+
 int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc, struct bcma_device *core,
                          bool enable)
 {
@@ -310,8 +317,6 @@ void bcma_core_pci_up(struct bcma_bus *bus)
 
        pc = &bus->drv_pci[0];
 
-       bcma_core_pci_power_save(pc, true);
-
        bcma_core_pci_extend_L1timer(pc, true);
 }
 EXPORT_SYMBOL_GPL(bcma_core_pci_up);
@@ -326,7 +331,5 @@ void bcma_core_pci_down(struct bcma_bus *bus)
        pc = &bus->drv_pci[0];
 
        bcma_core_pci_extend_L1timer(pc, false);
-
-       bcma_core_pci_power_save(pc, false);
 }
 EXPORT_SYMBOL_GPL(bcma_core_pci_down);
index d2d95ff5353b08a4f49d1c3da4179090751851bf..edfa2515bc8613f952c448194bfcebf7835d75c1 100644 (file)
@@ -1189,6 +1189,7 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
        int err;
        u32 cp;
 
+       memset(&arg64, 0, sizeof(arg64));
        err = 0;
        err |=
            copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
index 639d26b90b9117a56c69f991663f603847cc206c..2b944038453681ef15ba61f41e1cfa3a9e885fbe 100644 (file)
@@ -1193,6 +1193,7 @@ out_passthru:
                ida_pci_info_struct pciinfo;
 
                if (!arg) return -EINVAL;
+               memset(&pciinfo, 0, sizeof(pciinfo));
                pciinfo.bus = host->pci_dev->bus->number;
                pciinfo.dev_fn = host->pci_dev->devfn;
                pciinfo.board_id = host->board_id;
index a12b923bbaca91f14b923e0b271b00ebc1be8d28..0a327f4154a2b2039ad4ae0e50b7195dfa2cb5f0 100644 (file)
@@ -85,6 +85,7 @@ static struct usb_device_id ath3k_table[] = {
        { USB_DEVICE(0x04CA, 0x3008) },
        { USB_DEVICE(0x13d3, 0x3362) },
        { USB_DEVICE(0x0CF3, 0xE004) },
+       { USB_DEVICE(0x0CF3, 0xE005) },
        { USB_DEVICE(0x0930, 0x0219) },
        { USB_DEVICE(0x0489, 0xe057) },
        { USB_DEVICE(0x13d3, 0x3393) },
@@ -126,6 +127,7 @@ static struct usb_device_id ath3k_blist_tbl[] = {
        { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
index 8e16f0af6358872606fbe102bddc9ade2c6ab470..f3dfc0a88fdcb95e25647caa6cfc14a1d9a5a484 100644 (file)
@@ -102,6 +102,7 @@ static struct usb_device_id btusb_table[] = {
 
        /* Broadcom BCM20702A0 */
        { USB_DEVICE(0x0b05, 0x17b5) },
+       { USB_DEVICE(0x0b05, 0x17cb) },
        { USB_DEVICE(0x04ca, 0x2003) },
        { USB_DEVICE(0x0489, 0xe042) },
        { USB_DEVICE(0x413c, 0x8197) },
@@ -112,6 +113,9 @@ static struct usb_device_id btusb_table[] = {
        /*Broadcom devices with vendor specific id */
        { USB_VENDOR_AND_INTERFACE_INFO(0x0a5c, 0xff, 0x01, 0x01) },
 
+       /* Belkin F8065bf - Broadcom based */
+       { USB_VENDOR_AND_INTERFACE_INFO(0x050d, 0xff, 0x01, 0x01) },
+
        { }     /* Terminating entry */
 };
 
@@ -148,6 +152,7 @@ static struct usb_device_id blacklist_table[] = {
        { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
index 7a7929ba26588dbf07d014bf8bb3cf8f55da509a..06189e55b4e5a0479d2eaab0a31610ef91bf472f 100644 (file)
@@ -142,32 +142,6 @@ static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
        return length;
 }
 
-ssize_t tpm_show_locality(struct device *dev, struct device_attribute *attr,
-                         char *buf)
-{
-       struct tpm_chip *chip = dev_get_drvdata(dev);
-       struct tpm_private *priv = TPM_VPRIV(chip);
-       u8 locality = priv->shr->locality;
-
-       return sprintf(buf, "%d\n", locality);
-}
-
-ssize_t tpm_store_locality(struct device *dev, struct device_attribute *attr,
-                       const char *buf, size_t len)
-{
-       struct tpm_chip *chip = dev_get_drvdata(dev);
-       struct tpm_private *priv = TPM_VPRIV(chip);
-       u8 val;
-
-       int rv = kstrtou8(buf, 0, &val);
-       if (rv)
-               return rv;
-
-       priv->shr->locality = val;
-
-       return len;
-}
-
 static const struct file_operations vtpm_ops = {
        .owner = THIS_MODULE,
        .llseek = no_llseek,
@@ -188,8 +162,6 @@ static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
 static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
 static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL);
 static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
-static DEVICE_ATTR(locality, S_IRUGO | S_IWUSR, tpm_show_locality,
-               tpm_store_locality);
 
 static struct attribute *vtpm_attrs[] = {
        &dev_attr_pubek.attr,
@@ -202,7 +174,6 @@ static struct attribute *vtpm_attrs[] = {
        &dev_attr_cancel.attr,
        &dev_attr_durations.attr,
        &dev_attr_timeouts.attr,
-       &dev_attr_locality.attr,
        NULL,
 };
 
@@ -210,8 +181,6 @@ static struct attribute_group vtpm_attr_grp = {
        .attrs = vtpm_attrs,
 };
 
-#define TPM_LONG_TIMEOUT   (10 * 60 * HZ)
-
 static const struct tpm_vendor_specific tpm_vtpm = {
        .status = vtpm_status,
        .recv = vtpm_recv,
@@ -224,11 +193,6 @@ static const struct tpm_vendor_specific tpm_vtpm = {
        .miscdev = {
                .fops = &vtpm_ops,
        },
-       .duration = {
-               TPM_LONG_TIMEOUT,
-               TPM_LONG_TIMEOUT,
-               TPM_LONG_TIMEOUT,
-       },
 };
 
 static irqreturn_t tpmif_interrupt(int dummy, void *dev_id)
index 41c69469ce2000ec223170970c856a2287f10db2..971d796e071d889c290029ada3b2dc862f10fcbb 100644 (file)
@@ -26,6 +26,7 @@ config DW_APB_TIMER_OF
 
 config ARMADA_370_XP_TIMER
        bool
+       select CLKSRC_OF
 
 config ORION_TIMER
        select CLKSRC_OF
index 37f5325bec95936260c50b2a099ccc7fb000ba53..b9ddd9e3a2f599e2cc7424c1eac18d4280b2f850 100644 (file)
@@ -30,6 +30,9 @@ void __init clocksource_of_init(void)
        clocksource_of_init_fn init_func;
 
        for_each_matching_node_and_match(np, __clksrc_of_table, &match) {
+               if (!of_device_is_available(np))
+                       continue;
+
                init_func = match->data;
                init_func(np);
        }
index b9c81b7c3a3bfa5a251129e42c687b1e0aeeb851..3a5909c12d420dbbb6f54f011882303ba4a48a62 100644 (file)
@@ -301,7 +301,7 @@ static void em_sti_register_clockevent(struct em_sti_priv *p)
        ced->name = dev_name(&p->pdev->dev);
        ced->features = CLOCK_EVT_FEAT_ONESHOT;
        ced->rating = 200;
-       ced->cpumask = cpumask_of(0);
+       ced->cpumask = cpu_possible_mask;
        ced->set_next_event = em_sti_clock_event_next;
        ced->set_mode = em_sti_clock_event_mode;
 
index 5b34768f4d7c79f68253966acc8e8268e53f318e..62b0de6a18370fade34eca20205557bd5871cc3a 100644 (file)
@@ -428,7 +428,6 @@ static int exynos4_local_timer_setup(struct clock_event_device *evt)
                                evt->irq);
                        return -EIO;
                }
-               irq_set_affinity(evt->irq, cpumask_of(cpu));
        } else {
                enable_percpu_irq(mct_irqs[MCT_L0_IRQ], 0);
        }
@@ -449,6 +448,7 @@ static int exynos4_mct_cpu_notify(struct notifier_block *self,
                                           unsigned long action, void *hcpu)
 {
        struct mct_clock_event_device *mevt;
+       unsigned int cpu;
 
        /*
         * Grab cpu pointer in each case to avoid spurious
@@ -459,6 +459,12 @@ static int exynos4_mct_cpu_notify(struct notifier_block *self,
                mevt = this_cpu_ptr(&percpu_mct_tick);
                exynos4_local_timer_setup(&mevt->evt);
                break;
+       case CPU_ONLINE:
+               cpu = (unsigned long)hcpu;
+               if (mct_int_type == MCT_INT_SPI)
+                       irq_set_affinity(mct_irqs[MCT_L0_IRQ + cpu],
+                                               cpumask_of(cpu));
+               break;
        case CPU_DYING:
                mevt = this_cpu_ptr(&percpu_mct_tick);
                exynos4_local_timer_stop(&mevt->evt);
@@ -500,6 +506,8 @@ static void __init exynos4_timer_resources(struct device_node *np, void __iomem
                                         &percpu_mct_tick);
                WARN(err, "MCT: can't request IRQ %d (%d)\n",
                     mct_irqs[MCT_L0_IRQ], err);
+       } else {
+               irq_set_affinity(mct_irqs[MCT_L0_IRQ], cpumask_of(0));
        }
 
        err = register_cpu_notifier(&exynos4_mct_cpu_nb);
index a1260b4549db647336192d24b3471b6b627897af..d2c3253e015ee23f107d2d34333c6afa533ab7cf 100644 (file)
@@ -986,6 +986,10 @@ static int __init acpi_cpufreq_init(void)
 {
        int ret;
 
+       /* don't keep reloading if cpufreq_driver exists */
+       if (cpufreq_get_current_driver())
+               return 0;
+
        if (acpi_disabled)
                return 0;
 
index cbfffa91ebdd46054d21f5c4138909b549787b06..78c49d8e0f4a91a0d1815ac485b3be457d04f117 100644 (file)
@@ -12,6 +12,7 @@
 #define pr_fmt(fmt)    KBUILD_MODNAME ": " fmt
 
 #include <linux/clk.h>
+#include <linux/cpu.h>
 #include <linux/cpufreq.h>
 #include <linux/err.h>
 #include <linux/module.h>
@@ -177,7 +178,11 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
        struct device_node *np;
        int ret;
 
-       cpu_dev = &pdev->dev;
+       cpu_dev = get_cpu_device(0);
+       if (!cpu_dev) {
+               pr_err("failed to get cpu0 device\n");
+               return -ENODEV;
+       }
 
        np = of_node_get(cpu_dev->of_node);
        if (!np) {
index 43c24aa756f6f29935af8eb0614945a442a36a19..04548f7023af68dee6b36fc590ec59f02c0216f7 100644 (file)
@@ -952,9 +952,20 @@ static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
        if (cpu == policy->cpu)
                return;
 
+       /*
+        * Take direct locks as lock_policy_rwsem_write wouldn't work here.
+        * Also lock for last cpu is enough here as contention will happen only
+        * after policy->cpu is changed and after it is changed, other threads
+        * will try to acquire lock for new cpu. And policy is already updated
+        * by then.
+        */
+       down_write(&per_cpu(cpu_policy_rwsem, policy->cpu));
+
        policy->last_cpu = policy->cpu;
        policy->cpu = cpu;
 
+       up_write(&per_cpu(cpu_policy_rwsem, policy->last_cpu));
+
 #ifdef CONFIG_CPU_FREQ_TABLE
        cpufreq_frequency_table_update_policy_cpu(policy);
 #endif
@@ -1125,7 +1136,7 @@ static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
        int ret;
 
        /* first sibling now owns the new sysfs dir */
-       cpu_dev = get_cpu_device(cpumask_first(policy->cpus));
+       cpu_dev = get_cpu_device(cpumask_any_but(policy->cpus, old_cpu));
 
        /* Don't touch sysfs files during light-weight tear-down */
        if (frozen)
@@ -1189,12 +1200,9 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
                        policy->governor->name, CPUFREQ_NAME_LEN);
 #endif
 
-       WARN_ON(lock_policy_rwsem_write(cpu));
+       lock_policy_rwsem_read(cpu);
        cpus = cpumask_weight(policy->cpus);
-
-       if (cpus > 1)
-               cpumask_clear_cpu(cpu, policy->cpus);
-       unlock_policy_rwsem_write(cpu);
+       unlock_policy_rwsem_read(cpu);
 
        if (cpu != policy->cpu) {
                if (!frozen)
@@ -1203,9 +1211,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
 
                new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu, frozen);
                if (new_cpu >= 0) {
-                       WARN_ON(lock_policy_rwsem_write(cpu));
                        update_policy_cpu(policy, new_cpu);
-                       unlock_policy_rwsem_write(cpu);
 
                        if (!frozen) {
                                pr_debug("%s: policy Kobject moved to cpu: %d "
@@ -1237,9 +1243,12 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
                return -EINVAL;
        }
 
-       lock_policy_rwsem_read(cpu);
+       WARN_ON(lock_policy_rwsem_write(cpu));
        cpus = cpumask_weight(policy->cpus);
-       unlock_policy_rwsem_read(cpu);
+
+       if (cpus > 1)
+               cpumask_clear_cpu(cpu, policy->cpus);
+       unlock_policy_rwsem_write(cpu);
 
        /* If cpu is last user of policy, free policy */
        if (cpus == 1) {
@@ -1451,6 +1460,9 @@ unsigned int cpufreq_get(unsigned int cpu)
 {
        unsigned int ret_freq = 0;
 
+       if (cpufreq_disabled() || !cpufreq_driver)
+               return -ENOENT;
+
        if (!down_read_trylock(&cpufreq_rwsem))
                return 0;
 
@@ -2095,7 +2107,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
        write_lock_irqsave(&cpufreq_driver_lock, flags);
        if (cpufreq_driver) {
                write_unlock_irqrestore(&cpufreq_driver_lock, flags);
-               return -EBUSY;
+               return -EEXIST;
        }
        cpufreq_driver = driver_data;
        write_unlock_irqrestore(&cpufreq_driver_lock, flags);
index d514c152fd1a43041e8ff570fca7f9cc2b28f58d..be5380ecdcd43f95c4aa88a62389c184597f3971 100644 (file)
@@ -457,7 +457,7 @@ err_free_table:
        opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
 err_put_node:
        of_node_put(np);
-       dev_err(dvfs_info->dev, "%s: failed initialization\n", __func__);
+       dev_err(&pdev->dev, "%s: failed initialization\n", __func__);
        return ret;
 }
 
index 3e396543aea4f74bb6b2d54e5a839eedf43a2c1a..c3fd2a101ca02852c677d021c567b27ee74b075a 100644 (file)
@@ -7,6 +7,7 @@
  */
 
 #include <linux/clk.h>
+#include <linux/cpu.h>
 #include <linux/cpufreq.h>
 #include <linux/delay.h>
 #include <linux/err.h>
@@ -202,7 +203,11 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
        unsigned long min_volt, max_volt;
        int num, ret;
 
-       cpu_dev = &pdev->dev;
+       cpu_dev = get_cpu_device(0);
+       if (!cpu_dev) {
+               pr_err("failed to get cpu0 device\n");
+               return -ENODEV;
+       }
 
        np = of_node_get(cpu_dev->of_node);
        if (!np) {
index b4fb86d89850a31c3cc9424f4b4148831c59922f..224ff965bcf7de624c3b62b6db153bda090ea4a9 100644 (file)
 
 #include <drm/drmP.h>
 
+/******************************************************************/
+/** \name Context bitmap support */
+/*@{*/
+
 /**
  * Free a handle from the context bitmap.
  *
  * in drm_device::ctx_idr, while holding the drm_device::struct_mutex
  * lock.
  */
-static void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
+void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
 {
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
-               return;
-
        mutex_lock(&dev->struct_mutex);
        idr_remove(&dev->ctx_idr, ctx_handle);
        mutex_unlock(&dev->struct_mutex);
 }
 
-/******************************************************************/
-/** \name Context bitmap support */
-/*@{*/
-
-void drm_legacy_ctxbitmap_release(struct drm_device *dev,
-                                 struct drm_file *file_priv)
-{
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
-               return;
-
-       mutex_lock(&dev->ctxlist_mutex);
-       if (!list_empty(&dev->ctxlist)) {
-               struct drm_ctx_list *pos, *n;
-
-               list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
-                       if (pos->tag == file_priv &&
-                           pos->handle != DRM_KERNEL_CONTEXT) {
-                               if (dev->driver->context_dtor)
-                                       dev->driver->context_dtor(dev,
-                                                                 pos->handle);
-
-                               drm_ctxbitmap_free(dev, pos->handle);
-
-                               list_del(&pos->head);
-                               kfree(pos);
-                               --dev->ctx_count;
-                       }
-               }
-       }
-       mutex_unlock(&dev->ctxlist_mutex);
-}
-
 /**
  * Context bitmap allocation.
  *
@@ -121,12 +90,10 @@ static int drm_ctxbitmap_next(struct drm_device * dev)
  *
  * Initialise the drm_device::ctx_idr
  */
-void drm_legacy_ctxbitmap_init(struct drm_device * dev)
+int drm_ctxbitmap_init(struct drm_device * dev)
 {
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
-               return;
-
        idr_init(&dev->ctx_idr);
+       return 0;
 }
 
 /**
@@ -137,7 +104,7 @@ void drm_legacy_ctxbitmap_init(struct drm_device * dev)
  * Free all idr members using drm_ctx_sarea_free helper function
  * while holding the drm_device::struct_mutex lock.
  */
-void drm_legacy_ctxbitmap_cleanup(struct drm_device * dev)
+void drm_ctxbitmap_cleanup(struct drm_device * dev)
 {
        mutex_lock(&dev->struct_mutex);
        idr_destroy(&dev->ctx_idr);
@@ -169,9 +136,6 @@ int drm_getsareactx(struct drm_device *dev, void *data,
        struct drm_local_map *map;
        struct drm_map_list *_entry;
 
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
-               return -EINVAL;
-
        mutex_lock(&dev->struct_mutex);
 
        map = idr_find(&dev->ctx_idr, request->ctx_id);
@@ -216,9 +180,6 @@ int drm_setsareactx(struct drm_device *dev, void *data,
        struct drm_local_map *map = NULL;
        struct drm_map_list *r_list = NULL;
 
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
-               return -EINVAL;
-
        mutex_lock(&dev->struct_mutex);
        list_for_each_entry(r_list, &dev->maplist, head) {
                if (r_list->map
@@ -319,9 +280,6 @@ int drm_resctx(struct drm_device *dev, void *data,
        struct drm_ctx ctx;
        int i;
 
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
-               return -EINVAL;
-
        if (res->count >= DRM_RESERVED_CONTEXTS) {
                memset(&ctx, 0, sizeof(ctx));
                for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
@@ -352,9 +310,6 @@ int drm_addctx(struct drm_device *dev, void *data,
        struct drm_ctx_list *ctx_entry;
        struct drm_ctx *ctx = data;
 
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
-               return -EINVAL;
-
        ctx->handle = drm_ctxbitmap_next(dev);
        if (ctx->handle == DRM_KERNEL_CONTEXT) {
                /* Skip kernel's context and get a new one. */
@@ -398,9 +353,6 @@ int drm_getctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
 {
        struct drm_ctx *ctx = data;
 
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
-               return -EINVAL;
-
        /* This is 0, because we don't handle any context flags */
        ctx->flags = 0;
 
@@ -423,9 +375,6 @@ int drm_switchctx(struct drm_device *dev, void *data,
 {
        struct drm_ctx *ctx = data;
 
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
-               return -EINVAL;
-
        DRM_DEBUG("%d\n", ctx->handle);
        return drm_context_switch(dev, dev->last_context, ctx->handle);
 }
@@ -446,9 +395,6 @@ int drm_newctx(struct drm_device *dev, void *data,
 {
        struct drm_ctx *ctx = data;
 
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
-               return -EINVAL;
-
        DRM_DEBUG("%d\n", ctx->handle);
        drm_context_switch_complete(dev, file_priv, ctx->handle);
 
@@ -471,9 +417,6 @@ int drm_rmctx(struct drm_device *dev, void *data,
 {
        struct drm_ctx *ctx = data;
 
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
-               return -EINVAL;
-
        DRM_DEBUG("%d\n", ctx->handle);
        if (ctx->handle != DRM_KERNEL_CONTEXT) {
                if (dev->driver->context_dtor)
index 4be8e09a32ef730db4740f6f69e52f91ca829112..3f84277d7036b3f843120fb4145dc081010d3d0d 100644 (file)
@@ -439,7 +439,26 @@ int drm_release(struct inode *inode, struct file *filp)
        if (dev->driver->driver_features & DRIVER_GEM)
                drm_gem_release(dev, file_priv);
 
-       drm_legacy_ctxbitmap_release(dev, file_priv);
+       mutex_lock(&dev->ctxlist_mutex);
+       if (!list_empty(&dev->ctxlist)) {
+               struct drm_ctx_list *pos, *n;
+
+               list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
+                       if (pos->tag == file_priv &&
+                           pos->handle != DRM_KERNEL_CONTEXT) {
+                               if (dev->driver->context_dtor)
+                                       dev->driver->context_dtor(dev,
+                                                                 pos->handle);
+
+                               drm_ctxbitmap_free(dev, pos->handle);
+
+                               list_del(&pos->head);
+                               kfree(pos);
+                               --dev->ctx_count;
+                       }
+               }
+       }
+       mutex_unlock(&dev->ctxlist_mutex);
 
        mutex_lock(&dev->struct_mutex);
 
index e7eb0276f7f1968e6c71997eab517fd3a6746fac..39d864576be4a4d5f5957cdbf1e3112b1322a018 100644 (file)
@@ -292,7 +292,13 @@ int drm_fill_in_dev(struct drm_device *dev,
                        goto error_out_unreg;
        }
 
-       drm_legacy_ctxbitmap_init(dev);
+
+
+       retcode = drm_ctxbitmap_init(dev);
+       if (retcode) {
+               DRM_ERROR("Cannot allocate memory for context bitmap.\n");
+               goto error_out_unreg;
+       }
 
        if (driver->driver_features & DRIVER_GEM) {
                retcode = drm_gem_init(dev);
@@ -446,7 +452,7 @@ void drm_put_dev(struct drm_device *dev)
                drm_rmmap(dev, r_list->map);
        drm_ht_remove(&dev->map_hash);
 
-       drm_legacy_ctxbitmap_cleanup(dev);
+       drm_ctxbitmap_cleanup(dev);
 
        if (drm_core_check_feature(dev, DRIVER_MODESET))
                drm_put_minor(&dev->control);
index 4752f223e5b28a2fb793eb5b83be0fefb89a7ac2..45b6ef595965b7cb7391125d8b3a4caf2703b2b5 100644 (file)
@@ -56,7 +56,7 @@ config DRM_EXYNOS_IPP
 
 config DRM_EXYNOS_FIMC
        bool "Exynos DRM FIMC"
-       depends on DRM_EXYNOS_IPP && MFD_SYSCON && OF
+       depends on DRM_EXYNOS_IPP && MFD_SYSCON
        help
          Choose this option if you want to use Exynos FIMC for DRM.
 
index 3445a0f3a6b29dc6110d0e3a950a23de92a4098c..9c8088462c26f970d42b2fb40dae86490e51d3f6 100644 (file)
@@ -63,7 +63,8 @@ static int lowlevel_buffer_allocate(struct drm_device *dev,
                        return -ENOMEM;
                }
 
-               buf->kvaddr = dma_alloc_attrs(dev->dev, buf->size,
+               buf->kvaddr = (void __iomem *)dma_alloc_attrs(dev->dev,
+                                       buf->size,
                                        &buf->dma_addr, GFP_KERNEL,
                                        &buf->dma_attrs);
                if (!buf->kvaddr) {
@@ -90,9 +91,9 @@ static int lowlevel_buffer_allocate(struct drm_device *dev,
        }
 
        buf->sgt = drm_prime_pages_to_sg(buf->pages, nr_pages);
-       if (!buf->sgt) {
+       if (IS_ERR(buf->sgt)) {
                DRM_ERROR("failed to get sg table.\n");
-               ret = -ENOMEM;
+               ret = PTR_ERR(buf->sgt);
                goto err_free_attrs;
        }
 
index 78e868bcf1ecc364638e4ce3343cc66f260599f0..e7c2f2d07f193b0393052be2e1bf32921b804da9 100644 (file)
@@ -99,12 +99,13 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
                if (is_drm_iommu_supported(dev)) {
                        unsigned int nr_pages = buffer->size >> PAGE_SHIFT;
 
-                       buffer->kvaddr = vmap(buffer->pages, nr_pages, VM_MAP,
+                       buffer->kvaddr = (void __iomem *) vmap(buffer->pages,
+                                       nr_pages, VM_MAP,
                                        pgprot_writecombine(PAGE_KERNEL));
                } else {
                        phys_addr_t dma_addr = buffer->dma_addr;
                        if (dma_addr)
-                               buffer->kvaddr = phys_to_virt(dma_addr);
+                               buffer->kvaddr = (void __iomem *)phys_to_virt(dma_addr);
                        else
                                buffer->kvaddr = (void __iomem *)NULL;
                }
index b1f8fc69023fd97f9ee2ac28b3aaf35760aa3323..60e84043aa348fe3ec4293507edeb6b9477e3dc2 100644 (file)
@@ -707,8 +707,7 @@ tda998x_encoder_dpms(struct drm_encoder *encoder, int mode)
                reg_write(encoder, REG_VIP_CNTRL_2, priv->vip_cntrl_2);
                break;
        case DRM_MODE_DPMS_OFF:
-               /* disable audio and video ports */
-               reg_write(encoder, REG_ENA_AP, 0x00);
+               /* disable video ports */
                reg_write(encoder, REG_ENA_VP_0, 0x00);
                reg_write(encoder, REG_ENA_VP_1, 0x00);
                reg_write(encoder, REG_ENA_VP_2, 0x00);
index 8507c6d1e642d872c48f41834a5dbeca53d99c6b..cdfb9da0e4ce944529a329ce29f92d391e19973a 100644 (file)
@@ -1392,14 +1392,11 @@ out:
                if (i915_terminally_wedged(&dev_priv->gpu_error))
                        return VM_FAULT_SIGBUS;
        case -EAGAIN:
-               /* Give the error handler a chance to run and move the
-                * objects off the GPU active list. Next time we service the
-                * fault, we should be able to transition the page into the
-                * GTT without touching the GPU (and so avoid further
-                * EIO/EGAIN). If the GPU is wedged, then there is no issue
-                * with coherency, just lost writes.
+               /*
+                * EAGAIN means the gpu is hung and we'll wait for the error
+                * handler to reset everything when re-faulting in
+                * i915_mutex_lock_interruptible.
                 */
-               set_need_resched();
        case 0:
        case -ERESTARTSYS:
        case -EINTR:
@@ -4803,10 +4800,10 @@ i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc)
 
        if (!mutex_trylock(&dev->struct_mutex)) {
                if (!mutex_is_locked_by(&dev->struct_mutex, current))
-                       return SHRINK_STOP;
+                       return 0;
 
                if (dev_priv->mm.shrinker_no_lock_stealing)
-                       return SHRINK_STOP;
+                       return 0;
 
                unlock = false;
        }
@@ -4904,10 +4901,10 @@ i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc)
 
        if (!mutex_trylock(&dev->struct_mutex)) {
                if (!mutex_is_locked_by(&dev->struct_mutex, current))
-                       return 0;
+                       return SHRINK_STOP;
 
                if (dev_priv->mm.shrinker_no_lock_stealing)
-                       return 0;
+                       return SHRINK_STOP;
 
                unlock = false;
        }
index aba9d7498996c29845e6691ac4eff83c8fe85223..dae364f0028cc94d58f87613449b41d7620d7f29 100644 (file)
@@ -143,8 +143,10 @@ static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
 
        /* Seek the first printf which is hits start position */
        if (e->pos < e->start) {
-               len = vsnprintf(NULL, 0, f, args);
-               if (!__i915_error_seek(e, len))
+               va_list tmp;
+
+               va_copy(tmp, args);
+               if (!__i915_error_seek(e, vsnprintf(NULL, 0, f, tmp)))
                        return;
        }
 
index 83cce0cdb7691a9aa6024defc01c869a57b4a90a..4b91228fd9bd8e50e1319816a9fe53b6f166ec68 100644 (file)
@@ -1469,6 +1469,34 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
        return ret;
 }
 
+static void i915_error_wake_up(struct drm_i915_private *dev_priv,
+                              bool reset_completed)
+{
+       struct intel_ring_buffer *ring;
+       int i;
+
+       /*
+        * Notify all waiters for GPU completion events that reset state has
+        * been changed, and that they need to restart their wait after
+        * checking for potential errors (and bail out to drop locks if there is
+        * a gpu reset pending so that i915_error_work_func can acquire them).
+        */
+
+       /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
+       for_each_ring(ring, dev_priv, i)
+               wake_up_all(&ring->irq_queue);
+
+       /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
+       wake_up_all(&dev_priv->pending_flip_queue);
+
+       /*
+        * Signal tasks blocked in i915_gem_wait_for_error that the pending
+        * reset state is cleared.
+        */
+       if (reset_completed)
+               wake_up_all(&dev_priv->gpu_error.reset_queue);
+}
+
 /**
  * i915_error_work_func - do process context error handling work
  * @work: work struct
@@ -1483,11 +1511,10 @@ static void i915_error_work_func(struct work_struct *work)
        drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t,
                                                    gpu_error);
        struct drm_device *dev = dev_priv->dev;
-       struct intel_ring_buffer *ring;
        char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
        char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
        char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
-       int i, ret;
+       int ret;
 
        kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
 
@@ -1506,8 +1533,16 @@ static void i915_error_work_func(struct work_struct *work)
                kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE,
                                   reset_event);
 
+               /*
+                * All state reset _must_ be completed before we update the
+                * reset counter, for otherwise waiters might miss the reset
+                * pending state and not properly drop locks, resulting in
+                * deadlocks with the reset work.
+                */
                ret = i915_reset(dev);
 
+               intel_display_handle_reset(dev);
+
                if (ret == 0) {
                        /*
                         * After all the gem state is reset, increment the reset
@@ -1528,12 +1563,11 @@ static void i915_error_work_func(struct work_struct *work)
                        atomic_set(&error->reset_counter, I915_WEDGED);
                }
 
-               for_each_ring(ring, dev_priv, i)
-                       wake_up_all(&ring->irq_queue);
-
-               intel_display_handle_reset(dev);
-
-               wake_up_all(&dev_priv->gpu_error.reset_queue);
+               /*
+                * Note: The wake_up also serves as a memory barrier so that
+                * waiters see the update value of the reset counter atomic_t.
+                */
+               i915_error_wake_up(dev_priv, true);
        }
 }
 
@@ -1642,8 +1676,6 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
 void i915_handle_error(struct drm_device *dev, bool wedged)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_ring_buffer *ring;
-       int i;
 
        i915_capture_error_state(dev);
        i915_report_and_clear_eir(dev);
@@ -1653,11 +1685,19 @@ void i915_handle_error(struct drm_device *dev, bool wedged)
                                &dev_priv->gpu_error.reset_counter);
 
                /*
-                * Wakeup waiting processes so that the reset work item
-                * doesn't deadlock trying to grab various locks.
+                * Wakeup waiting processes so that the reset work function
+                * i915_error_work_func doesn't deadlock trying to grab various
+                * locks. By bumping the reset counter first, the woken
+                * processes will see a reset in progress and back off,
+                * releasing their locks and then wait for the reset completion.
+                * We must do this for _all_ gpu waiters that might hold locks
+                * that the reset work needs to acquire.
+                *
+                * Note: The wake_up serves as the required memory barrier to
+                * ensure that the waiters see the updated value of the reset
+                * counter atomic_t.
                 */
-               for_each_ring(ring, dev_priv, i)
-                       wake_up_all(&ring->irq_queue);
+               i915_error_wake_up(dev_priv, false);
        }
 
        /*
index 63aca49d11a843a6ad6ae1a62dc11d0b3bfd470c..63de2701b97403a82ffd221424d5b5b9acda5843 100644 (file)
@@ -778,7 +778,7 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
                        /* Can only use the always-on power well for eDP when
                         * not using the panel fitter, and when not using motion
                          * blur mitigation (which we don't support). */
-                       if (intel_crtc->config.pch_pfit.size)
+                       if (intel_crtc->config.pch_pfit.enabled)
                                temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
                        else
                                temp |= TRANS_DDI_EDP_INPUT_A_ON;
index 2489d0b4c7d2db8a8b5d74c04107f1b405c8ad35..e5822e79f912d9d447901f7dbf71fe911b5ade30 100644 (file)
@@ -2249,7 +2249,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
                I915_WRITE(PIPESRC(intel_crtc->pipe),
                           ((crtc->mode.hdisplay - 1) << 16) |
                           (crtc->mode.vdisplay - 1));
-               if (!intel_crtc->config.pch_pfit.size &&
+               if (!intel_crtc->config.pch_pfit.enabled &&
                    (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
                     intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
                        I915_WRITE(PF_CTL(intel_crtc->pipe), 0);
@@ -3203,7 +3203,7 @@ static void ironlake_pfit_enable(struct intel_crtc *crtc)
        struct drm_i915_private *dev_priv = dev->dev_private;
        int pipe = crtc->pipe;
 
-       if (crtc->config.pch_pfit.size) {
+       if (crtc->config.pch_pfit.enabled) {
                /* Force use of hard-coded filter coefficients
                 * as some pre-programmed values are broken,
                 * e.g. x201.
@@ -3428,7 +3428,7 @@ static void ironlake_pfit_disable(struct intel_crtc *crtc)
 
        /* To avoid upsetting the power well on haswell only disable the pfit if
         * it's in use. The hw state code will make sure we get this right. */
-       if (crtc->config.pch_pfit.size) {
+       if (crtc->config.pch_pfit.enabled) {
                I915_WRITE(PF_CTL(pipe), 0);
                I915_WRITE(PF_WIN_POS(pipe), 0);
                I915_WRITE(PF_WIN_SZ(pipe), 0);
@@ -4775,6 +4775,10 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
 
        pipeconf = 0;
 
+       if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
+           I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE)
+               pipeconf |= PIPECONF_ENABLE;
+
        if (intel_crtc->pipe == 0 && INTEL_INFO(dev)->gen < 4) {
                /* Enable pixel doubling when the dot clock is > 90% of the (display)
                 * core speed.
@@ -4877,9 +4881,6 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
                return -EINVAL;
        }
 
-       /* Ensure that the cursor is valid for the new mode before changing... */
-       intel_crtc_update_cursor(crtc, true);
-
        if (is_lvds && dev_priv->lvds_downclock_avail) {
                /*
                 * Ensure we match the reduced clock's P to the target clock.
@@ -5768,9 +5769,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
                intel_crtc->config.dpll.p2 = clock.p2;
        }
 
-       /* Ensure that the cursor is valid for the new mode before changing... */
-       intel_crtc_update_cursor(crtc, true);
-
        /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
        if (intel_crtc->config.has_pch_encoder) {
                fp = i9xx_dpll_compute_fp(&intel_crtc->config.dpll);
@@ -5859,6 +5857,7 @@ static void ironlake_get_pfit_config(struct intel_crtc *crtc,
        tmp = I915_READ(PF_CTL(crtc->pipe));
 
        if (tmp & PF_ENABLE) {
+               pipe_config->pch_pfit.enabled = true;
                pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
                pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
 
@@ -6236,7 +6235,7 @@ static void haswell_modeset_global_resources(struct drm_device *dev)
                if (!crtc->base.enabled)
                        continue;
 
-               if (crtc->pipe != PIPE_A || crtc->config.pch_pfit.size ||
+               if (crtc->pipe != PIPE_A || crtc->config.pch_pfit.enabled ||
                    crtc->config.cpu_transcoder != TRANSCODER_EDP)
                        enable = true;
        }
@@ -6259,9 +6258,6 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc,
        if (!intel_ddi_pll_mode_set(crtc))
                return -EINVAL;
 
-       /* Ensure that the cursor is valid for the new mode before changing... */
-       intel_crtc_update_cursor(crtc, true);
-
        if (intel_crtc->config.has_dp_encoder)
                intel_dp_set_m_n(intel_crtc);
 
@@ -6494,15 +6490,15 @@ static void haswell_write_eld(struct drm_connector *connector,
 
        /* Set ELD valid state */
        tmp = I915_READ(aud_cntrl_st2);
-       DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%8x\n", tmp);
+       DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%08x\n", tmp);
        tmp |= (AUDIO_ELD_VALID_A << (pipe * 4));
        I915_WRITE(aud_cntrl_st2, tmp);
        tmp = I915_READ(aud_cntrl_st2);
-       DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%8x\n", tmp);
+       DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%08x\n", tmp);
 
        /* Enable HDMI mode */
        tmp = I915_READ(aud_config);
-       DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%8x\n", tmp);
+       DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%08x\n", tmp);
        /* clear N_programing_enable and N_value_index */
        tmp &= ~(AUD_CONFIG_N_VALUE_INDEX | AUD_CONFIG_N_PROG_ENABLE);
        I915_WRITE(aud_config, tmp);
@@ -6937,7 +6933,8 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
        intel_crtc->cursor_width = width;
        intel_crtc->cursor_height = height;
 
-       intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
+       if (intel_crtc->active)
+               intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
 
        return 0;
 fail_unpin:
@@ -6956,7 +6953,8 @@ static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
        intel_crtc->cursor_x = x;
        intel_crtc->cursor_y = y;
 
-       intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
+       if (intel_crtc->active)
+               intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
 
        return 0;
 }
@@ -8205,9 +8203,10 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
                      pipe_config->gmch_pfit.control,
                      pipe_config->gmch_pfit.pgm_ratios,
                      pipe_config->gmch_pfit.lvds_border_bits);
-       DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x\n",
+       DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
                      pipe_config->pch_pfit.pos,
-                     pipe_config->pch_pfit.size);
+                     pipe_config->pch_pfit.size,
+                     pipe_config->pch_pfit.enabled ? "enabled" : "disabled");
        DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
 }
 
@@ -8603,8 +8602,11 @@ intel_pipe_config_compare(struct drm_device *dev,
        if (INTEL_INFO(dev)->gen < 4)
                PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
        PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits);
-       PIPE_CONF_CHECK_I(pch_pfit.pos);
-       PIPE_CONF_CHECK_I(pch_pfit.size);
+       PIPE_CONF_CHECK_I(pch_pfit.enabled);
+       if (current_config->pch_pfit.enabled) {
+               PIPE_CONF_CHECK_I(pch_pfit.pos);
+               PIPE_CONF_CHECK_I(pch_pfit.size);
+       }
 
        PIPE_CONF_CHECK_I(ips_enabled);
 
index 2151d13772b8248cf1f6d6d71c2720fa074fe435..79c14e298ba657d32f0a57493ec9500b5216ba2d 100644 (file)
@@ -588,7 +588,18 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
                        DRM_DEBUG_KMS("aux_ch native nack\n");
                        return -EREMOTEIO;
                case AUX_NATIVE_REPLY_DEFER:
-                       udelay(100);
+                       /*
+                        * For now, just give more slack to branch devices. We
+                        * could check the DPCD for I2C bit rate capabilities,
+                        * and if available, adjust the interval. We could also
+                        * be more careful with DP-to-Legacy adapters where a
+                        * long legacy cable may force very low I2C bit rates.
+                        */
+                       if (intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
+                           DP_DWN_STRM_PORT_PRESENT)
+                               usleep_range(500, 600);
+                       else
+                               usleep_range(300, 400);
                        continue;
                default:
                        DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
index a47799e832c6e61f5ff02afbf35ebe2cde73a4ed..28cae80495e2b1e9d1f4fe0d089bbd63fd7a11b7 100644 (file)
@@ -280,6 +280,7 @@ struct intel_crtc_config {
        struct {
                u32 pos;
                u32 size;
+               bool enabled;
        } pch_pfit;
 
        /* FDI configuration, only valid if has_pch_encoder is set. */
index 406303b509c1c0afeed4be9379060e5417e212e0..7fa7df546c1ee6e36e119af34d6e79a9d7015e82 100644 (file)
@@ -263,6 +263,8 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder,
                C(vtotal);
                C(clock);
 #undef C
+
+               drm_mode_set_crtcinfo(adjusted_mode, 0);
        }
 
        if (intel_dvo->dev.dev_ops->mode_fixup)
index 42114ecbae0e3c4c8dc51b7b02a1f658de857873..293564a2896a19038cc87ecba7648bce511388d2 100644 (file)
@@ -112,6 +112,7 @@ intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
 done:
        pipe_config->pch_pfit.pos = (x << 16) | y;
        pipe_config->pch_pfit.size = (width << 16) | height;
+       pipe_config->pch_pfit.enabled = pipe_config->pch_pfit.size != 0;
 }
 
 static void
index 0c115cc4899ffbe00de6ca305d5cd32bd2590405..dd176b7296c1c44904a163cfe82960f6e196249d 100644 (file)
@@ -2096,16 +2096,16 @@ static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev,
                                    struct drm_crtc *crtc)
 {
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       uint32_t pixel_rate, pfit_size;
+       uint32_t pixel_rate;
 
        pixel_rate = intel_crtc->config.adjusted_mode.clock;
 
        /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
         * adjust the pixel_rate here. */
 
-       pfit_size = intel_crtc->config.pch_pfit.size;
-       if (pfit_size) {
+       if (intel_crtc->config.pch_pfit.enabled) {
                uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
+               uint32_t pfit_size = intel_crtc->config.pch_pfit.size;
 
                pipe_w = intel_crtc->config.requested_mode.hdisplay;
                pipe_h = intel_crtc->config.requested_mode.vdisplay;
index 85037b9d4934d406306634c486ed93cc92b771f3..49482fd5b76c6cad80298d2d2f67c0050c73a93d 100644 (file)
@@ -788,6 +788,8 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
        uint16_t h_sync_offset, v_sync_offset;
        int mode_clock;
 
+       memset(dtd, 0, sizeof(*dtd));
+
        width = mode->hdisplay;
        height = mode->vdisplay;
 
@@ -830,44 +832,51 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
        if (mode->flags & DRM_MODE_FLAG_PVSYNC)
                dtd->part2.dtd_flags |= DTD_FLAG_VSYNC_POSITIVE;
 
-       dtd->part2.sdvo_flags = 0;
        dtd->part2.v_sync_off_high = v_sync_offset & 0xc0;
-       dtd->part2.reserved = 0;
 }
 
-static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode,
+static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode *pmode,
                                         const struct intel_sdvo_dtd *dtd)
 {
-       mode->hdisplay = dtd->part1.h_active;
-       mode->hdisplay += ((dtd->part1.h_high >> 4) & 0x0f) << 8;
-       mode->hsync_start = mode->hdisplay + dtd->part2.h_sync_off;
-       mode->hsync_start += (dtd->part2.sync_off_width_high & 0xc0) << 2;
-       mode->hsync_end = mode->hsync_start + dtd->part2.h_sync_width;
-       mode->hsync_end += (dtd->part2.sync_off_width_high & 0x30) << 4;
-       mode->htotal = mode->hdisplay + dtd->part1.h_blank;
-       mode->htotal += (dtd->part1.h_high & 0xf) << 8;
-
-       mode->vdisplay = dtd->part1.v_active;
-       mode->vdisplay += ((dtd->part1.v_high >> 4) & 0x0f) << 8;
-       mode->vsync_start = mode->vdisplay;
-       mode->vsync_start += (dtd->part2.v_sync_off_width >> 4) & 0xf;
-       mode->vsync_start += (dtd->part2.sync_off_width_high & 0x0c) << 2;
-       mode->vsync_start += dtd->part2.v_sync_off_high & 0xc0;
-       mode->vsync_end = mode->vsync_start +
+       struct drm_display_mode mode = {};
+
+       mode.hdisplay = dtd->part1.h_active;
+       mode.hdisplay += ((dtd->part1.h_high >> 4) & 0x0f) << 8;
+       mode.hsync_start = mode.hdisplay + dtd->part2.h_sync_off;
+       mode.hsync_start += (dtd->part2.sync_off_width_high & 0xc0) << 2;
+       mode.hsync_end = mode.hsync_start + dtd->part2.h_sync_width;
+       mode.hsync_end += (dtd->part2.sync_off_width_high & 0x30) << 4;
+       mode.htotal = mode.hdisplay + dtd->part1.h_blank;
+       mode.htotal += (dtd->part1.h_high & 0xf) << 8;
+
+       mode.vdisplay = dtd->part1.v_active;
+       mode.vdisplay += ((dtd->part1.v_high >> 4) & 0x0f) << 8;
+       mode.vsync_start = mode.vdisplay;
+       mode.vsync_start += (dtd->part2.v_sync_off_width >> 4) & 0xf;
+       mode.vsync_start += (dtd->part2.sync_off_width_high & 0x0c) << 2;
+       mode.vsync_start += dtd->part2.v_sync_off_high & 0xc0;
+       mode.vsync_end = mode.vsync_start +
                (dtd->part2.v_sync_off_width & 0xf);
-       mode->vsync_end += (dtd->part2.sync_off_width_high & 0x3) << 4;
-       mode->vtotal = mode->vdisplay + dtd->part1.v_blank;
-       mode->vtotal += (dtd->part1.v_high & 0xf) << 8;
+       mode.vsync_end += (dtd->part2.sync_off_width_high & 0x3) << 4;
+       mode.vtotal = mode.vdisplay + dtd->part1.v_blank;
+       mode.vtotal += (dtd->part1.v_high & 0xf) << 8;
 
-       mode->clock = dtd->part1.clock * 10;
+       mode.clock = dtd->part1.clock * 10;
 
-       mode->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
        if (dtd->part2.dtd_flags & DTD_FLAG_INTERLACE)
-               mode->flags |= DRM_MODE_FLAG_INTERLACE;
+               mode.flags |= DRM_MODE_FLAG_INTERLACE;
        if (dtd->part2.dtd_flags & DTD_FLAG_HSYNC_POSITIVE)
-               mode->flags |= DRM_MODE_FLAG_PHSYNC;
+               mode.flags |= DRM_MODE_FLAG_PHSYNC;
+       else
+               mode.flags |= DRM_MODE_FLAG_NHSYNC;
        if (dtd->part2.dtd_flags & DTD_FLAG_VSYNC_POSITIVE)
-               mode->flags |= DRM_MODE_FLAG_PVSYNC;
+               mode.flags |= DRM_MODE_FLAG_PVSYNC;
+       else
+               mode.flags |= DRM_MODE_FLAG_NVSYNC;
+
+       drm_mode_set_crtcinfo(&mode, 0);
+
+       drm_mode_copy(pmode, &mode);
 }
 
 static bool intel_sdvo_check_supp_encode(struct intel_sdvo *intel_sdvo)
index f2c6d7909ae2d66ffa73ebe77d7c3b06d46403f7..dd6f84bf6c220e94c7f50e1d38ca7fea9f11cecb 100644 (file)
@@ -916,6 +916,14 @@ intel_tv_compute_config(struct intel_encoder *encoder,
        DRM_DEBUG_KMS("forcing bpc to 8 for TV\n");
        pipe_config->pipe_bpp = 8*3;
 
+       /* TV has it's own notion of sync and other mode flags, so clear them. */
+       pipe_config->adjusted_mode.flags = 0;
+
+       /*
+        * FIXME: We don't check whether the input mode is actually what we want
+        * or whether userspace is doing something stupid.
+        */
+
        return true;
 }
 
index a60584763b61dde085f39c8058e4cb6900e2bace..a0b9d8a95b16c17ad6b11ae1b6da662e80484b04 100644 (file)
@@ -124,6 +124,8 @@ void adreno_recover(struct msm_gpu *gpu)
 
        /* reset completed fence seqno, just discard anything pending: */
        adreno_gpu->memptrs->fence = gpu->submitted_fence;
+       adreno_gpu->memptrs->rptr  = 0;
+       adreno_gpu->memptrs->wptr  = 0;
 
        gpu->funcs->pm_resume(gpu);
        ret = gpu->funcs->hw_init(gpu);
@@ -229,7 +231,7 @@ void adreno_idle(struct msm_gpu *gpu)
                        return;
        } while(time_before(jiffies, t));
 
-       DRM_ERROR("timeout waiting for %s to drain ringbuffer!\n", gpu->name);
+       DRM_ERROR("%s: timeout waiting to drain ringbuffer!\n", gpu->name);
 
        /* TODO maybe we need to reset GPU here to recover from hang? */
 }
@@ -256,11 +258,17 @@ void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords)
 {
        struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
        uint32_t freedwords;
+       unsigned long t = jiffies + ADRENO_IDLE_TIMEOUT;
        do {
                uint32_t size = gpu->rb->size / 4;
                uint32_t wptr = get_wptr(gpu->rb);
                uint32_t rptr = adreno_gpu->memptrs->rptr;
                freedwords = (rptr + (size - 1) - wptr) % size;
+
+               if (time_after(jiffies, t)) {
+                       DRM_ERROR("%s: timeout waiting for ringbuffer space\n", gpu->name);
+                       break;
+               }
        } while(freedwords < ndwords);
 }
 
index 5db5bbaedae21d64b9764fc8d009dfa86b862543..bc7fd11ad8be4e7d4785d0f696d128220f25b7ca 100644 (file)
@@ -19,8 +19,6 @@
 #include "msm_drv.h"
 #include "mdp4_kms.h"
 
-#include <mach/iommu.h>
-
 static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev);
 
 static int mdp4_hw_init(struct msm_kms *kms)
index 864c9773636baf6aa7173c6b16a5f81841a2b922..b3a2f16290417cb055a8d7d89dc8f3f876b3a090 100644 (file)
@@ -18,8 +18,6 @@
 #include "msm_drv.h"
 #include "msm_gpu.h"
 
-#include <mach/iommu.h>
-
 static void msm_fb_output_poll_changed(struct drm_device *dev)
 {
        struct msm_drm_private *priv = dev->dev_private;
@@ -62,6 +60,8 @@ int msm_iommu_attach(struct drm_device *dev, struct iommu_domain *iommu,
        int i, ret;
 
        for (i = 0; i < cnt; i++) {
+               /* TODO maybe some day msm iommu won't require this hack: */
+               struct device *msm_iommu_get_ctx(const char *ctx_name);
                struct device *ctx = msm_iommu_get_ctx(names[i]);
                if (!ctx)
                        continue;
@@ -199,7 +199,7 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
                 * imx drm driver on iMX5
                 */
                dev_err(dev->dev, "failed to load kms\n");
-               ret = PTR_ERR(priv->kms);
+               ret = PTR_ERR(kms);
                goto fail;
        }
 
@@ -499,25 +499,41 @@ int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
                struct timespec *timeout)
 {
        struct msm_drm_private *priv = dev->dev_private;
-       unsigned long timeout_jiffies = timespec_to_jiffies(timeout);
-       unsigned long start_jiffies = jiffies;
-       unsigned long remaining_jiffies;
        int ret;
 
-       if (time_after(start_jiffies, timeout_jiffies))
-               remaining_jiffies = 0;
-       else
-               remaining_jiffies = timeout_jiffies - start_jiffies;
-
-       ret = wait_event_interruptible_timeout(priv->fence_event,
-                       priv->completed_fence >= fence,
-                       remaining_jiffies);
-       if (ret == 0) {
-               DBG("timeout waiting for fence: %u (completed: %u)",
-                               fence, priv->completed_fence);
-               ret = -ETIMEDOUT;
-       } else if (ret != -ERESTARTSYS) {
-               ret = 0;
+       if (!priv->gpu)
+               return 0;
+
+       if (fence > priv->gpu->submitted_fence) {
+               DRM_ERROR("waiting on invalid fence: %u (of %u)\n",
+                               fence, priv->gpu->submitted_fence);
+               return -EINVAL;
+       }
+
+       if (!timeout) {
+               /* no-wait: */
+               ret = fence_completed(dev, fence) ? 0 : -EBUSY;
+       } else {
+               unsigned long timeout_jiffies = timespec_to_jiffies(timeout);
+               unsigned long start_jiffies = jiffies;
+               unsigned long remaining_jiffies;
+
+               if (time_after(start_jiffies, timeout_jiffies))
+                       remaining_jiffies = 0;
+               else
+                       remaining_jiffies = timeout_jiffies - start_jiffies;
+
+               ret = wait_event_interruptible_timeout(priv->fence_event,
+                               fence_completed(dev, fence),
+                               remaining_jiffies);
+
+               if (ret == 0) {
+                       DBG("timeout waiting for fence: %u (completed: %u)",
+                                       fence, priv->completed_fence);
+                       ret = -ETIMEDOUT;
+               } else if (ret != -ERESTARTSYS) {
+                       ret = 0;
+               }
        }
 
        return ret;
@@ -681,7 +697,7 @@ static struct drm_driver msm_driver = {
        .gem_vm_ops         = &vm_ops,
        .dumb_create        = msm_gem_dumb_create,
        .dumb_map_offset    = msm_gem_dumb_map_offset,
-       .dumb_destroy       = msm_gem_dumb_destroy,
+       .dumb_destroy       = drm_gem_dumb_destroy,
 #ifdef CONFIG_DEBUG_FS
        .debugfs_init       = msm_debugfs_init,
        .debugfs_cleanup    = msm_debugfs_cleanup,
index 80d75094bf0afd9bb24cb34971cd5622259717ca..df8f1d084bc1d76d1ee8dc5db948e7ebf9f87771 100644 (file)
@@ -153,7 +153,7 @@ void *msm_gem_vaddr(struct drm_gem_object *obj);
 int msm_gem_queue_inactive_work(struct drm_gem_object *obj,
                struct work_struct *work);
 void msm_gem_move_to_active(struct drm_gem_object *obj,
-               struct msm_gpu *gpu, uint32_t fence);
+               struct msm_gpu *gpu, bool write, uint32_t fence);
 void msm_gem_move_to_inactive(struct drm_gem_object *obj);
 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op,
                struct timespec *timeout);
@@ -191,6 +191,12 @@ u32 msm_readl(const void __iomem *addr);
 #define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
 #define VERB(fmt, ...) if (0) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
 
+static inline bool fence_completed(struct drm_device *dev, uint32_t fence)
+{
+       struct msm_drm_private *priv = dev->dev_private;
+       return priv->completed_fence >= fence;
+}
+
 static inline int align_pitch(int width, int bpp)
 {
        int bytespp = (bpp + 7) / 8;
index 6b5a6c8c7658c5b91e905ac013779d022d299cfe..2bae46c66a30dd4c3eac623c07dbf55c70d32cb8 100644 (file)
@@ -40,9 +40,9 @@ static struct page **get_pages(struct drm_gem_object *obj)
                }
 
                msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
-               if (!msm_obj->sgt) {
+               if (IS_ERR(msm_obj->sgt)) {
                        dev_err(dev->dev, "failed to allocate sgt\n");
-                       return ERR_PTR(-ENOMEM);
+                       return ERR_CAST(msm_obj->sgt);
                }
 
                msm_obj->pages = p;
@@ -159,7 +159,6 @@ out_unlock:
 out:
        switch (ret) {
        case -EAGAIN:
-               set_need_resched();
        case 0:
        case -ERESTARTSYS:
        case -EINTR:
@@ -320,13 +319,6 @@ int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
                        MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
 }
 
-int msm_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
-               uint32_t handle)
-{
-       /* No special work needed, drop the reference and see what falls out */
-       return drm_gem_handle_delete(file, handle);
-}
-
 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
                uint32_t handle, uint64_t *offset)
 {
@@ -393,11 +385,14 @@ int msm_gem_queue_inactive_work(struct drm_gem_object *obj,
 }
 
 void msm_gem_move_to_active(struct drm_gem_object *obj,
-               struct msm_gpu *gpu, uint32_t fence)
+               struct msm_gpu *gpu, bool write, uint32_t fence)
 {
        struct msm_gem_object *msm_obj = to_msm_bo(obj);
        msm_obj->gpu = gpu;
-       msm_obj->fence = fence;
+       if (write)
+               msm_obj->write_fence = fence;
+       else
+               msm_obj->read_fence = fence;
        list_del_init(&msm_obj->mm_list);
        list_add_tail(&msm_obj->mm_list, &gpu->active_list);
 }
@@ -411,7 +406,8 @@ void msm_gem_move_to_inactive(struct drm_gem_object *obj)
        WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
        msm_obj->gpu = NULL;
-       msm_obj->fence = 0;
+       msm_obj->read_fence = 0;
+       msm_obj->write_fence = 0;
        list_del_init(&msm_obj->mm_list);
        list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
 
@@ -433,8 +429,18 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op,
        struct msm_gem_object *msm_obj = to_msm_bo(obj);
        int ret = 0;
 
-       if (is_active(msm_obj) && !(op & MSM_PREP_NOSYNC))
-               ret = msm_wait_fence_interruptable(dev, msm_obj->fence, timeout);
+       if (is_active(msm_obj)) {
+               uint32_t fence = 0;
+
+               if (op & MSM_PREP_READ)
+                       fence = msm_obj->write_fence;
+               if (op & MSM_PREP_WRITE)
+                       fence = max(fence, msm_obj->read_fence);
+               if (op & MSM_PREP_NOSYNC)
+                       timeout = NULL;
+
+               ret = msm_wait_fence_interruptable(dev, fence, timeout);
+       }
 
        /* TODO cache maintenance */
 
@@ -455,9 +461,10 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
        uint64_t off = drm_vma_node_start(&obj->vma_node);
 
        WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-       seq_printf(m, "%08x: %c(%d) %2d (%2d) %08llx %p %d\n",
+       seq_printf(m, "%08x: %c(r=%u,w=%u) %2d (%2d) %08llx %p %d\n",
                        msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
-                       msm_obj->fence, obj->name, obj->refcount.refcount.counter,
+                       msm_obj->read_fence, msm_obj->write_fence,
+                       obj->name, obj->refcount.refcount.counter,
                        off, msm_obj->vaddr, obj->size);
 }
 
index d746f13d283c8cdaf7d885b2cd52e254a98f6061..0676f32e2c6ab917fe9980380c82137a1ad8f7ce 100644 (file)
@@ -36,7 +36,7 @@ struct msm_gem_object {
         */
        struct list_head mm_list;
        struct msm_gpu *gpu;     /* non-null if active */
-       uint32_t fence;
+       uint32_t read_fence, write_fence;
 
        /* Transiently in the process of submit ioctl, objects associated
         * with the submit are on submit->bo_list.. this only lasts for
index 3e1ef3a00f60cc929555768c1cb963b0b265e251..5281d4bc37f750e2162e4bd1f17859e4921c45cc 100644 (file)
@@ -78,7 +78,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
                }
 
                if (submit_bo.flags & BO_INVALID_FLAGS) {
-                       DBG("invalid flags: %x", submit_bo.flags);
+                       DRM_ERROR("invalid flags: %x\n", submit_bo.flags);
                        ret = -EINVAL;
                        goto out_unlock;
                }
@@ -92,7 +92,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
                 */
                obj = idr_find(&file->object_idr, submit_bo.handle);
                if (!obj) {
-                       DBG("invalid handle %u at index %u", submit_bo.handle, i);
+                       DRM_ERROR("invalid handle %u at index %u\n", submit_bo.handle, i);
                        ret = -EINVAL;
                        goto out_unlock;
                }
@@ -100,7 +100,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
                msm_obj = to_msm_bo(obj);
 
                if (!list_empty(&msm_obj->submit_entry)) {
-                       DBG("handle %u at index %u already on submit list",
+                       DRM_ERROR("handle %u at index %u already on submit list\n",
                                        submit_bo.handle, i);
                        ret = -EINVAL;
                        goto out_unlock;
@@ -216,8 +216,9 @@ static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
                struct msm_gem_object **obj, uint32_t *iova, bool *valid)
 {
        if (idx >= submit->nr_bos) {
-               DBG("invalid buffer index: %u (out of %u)", idx, submit->nr_bos);
-               return EINVAL;
+               DRM_ERROR("invalid buffer index: %u (out of %u)\n",
+                               idx, submit->nr_bos);
+               return -EINVAL;
        }
 
        if (obj)
@@ -239,7 +240,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
        int ret;
 
        if (offset % 4) {
-               DBG("non-aligned cmdstream buffer: %u", offset);
+               DRM_ERROR("non-aligned cmdstream buffer: %u\n", offset);
                return -EINVAL;
        }
 
@@ -266,7 +267,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
                        return -EFAULT;
 
                if (submit_reloc.submit_offset % 4) {
-                       DBG("non-aligned reloc offset: %u",
+                       DRM_ERROR("non-aligned reloc offset: %u\n",
                                        submit_reloc.submit_offset);
                        return -EINVAL;
                }
@@ -276,7 +277,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
 
                if ((off >= (obj->base.size / 4)) ||
                                (off < last_offset)) {
-                       DBG("invalid offset %u at reloc %u", off, i);
+                       DRM_ERROR("invalid offset %u at reloc %u\n", off, i);
                        return -EINVAL;
                }
 
@@ -374,14 +375,15 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
                        goto out;
 
                if (submit_cmd.size % 4) {
-                       DBG("non-aligned cmdstream buffer size: %u",
+                       DRM_ERROR("non-aligned cmdstream buffer size: %u\n",
                                        submit_cmd.size);
                        ret = -EINVAL;
                        goto out;
                }
 
-               if (submit_cmd.size >= msm_obj->base.size) {
-                       DBG("invalid cmdstream size: %u", submit_cmd.size);
+               if ((submit_cmd.size + submit_cmd.submit_offset) >=
+                               msm_obj->base.size) {
+                       DRM_ERROR("invalid cmdstream size: %u\n", submit_cmd.size);
                        ret = -EINVAL;
                        goto out;
                }
index e1e1ec9321ffe5d8db74e53a5d52b8a4122a51d9..3bab937965d1f596405864676464fe6370bf87bd 100644 (file)
 static void bs_init(struct msm_gpu *gpu, struct platform_device *pdev)
 {
        struct drm_device *dev = gpu->dev;
-       struct kgsl_device_platform_data *pdata = pdev->dev.platform_data;
+       struct kgsl_device_platform_data *pdata;
 
        if (!pdev) {
                dev_err(dev->dev, "could not find dtv pdata\n");
                return;
        }
 
+       pdata = pdev->dev.platform_data;
        if (pdata->bus_scale_table) {
                gpu->bsc = msm_bus_scale_register_client(pdata->bus_scale_table);
                DBG("bus scale client: %08x", gpu->bsc);
@@ -230,6 +231,8 @@ static void hangcheck_timer_reset(struct msm_gpu *gpu)
 static void hangcheck_handler(unsigned long data)
 {
        struct msm_gpu *gpu = (struct msm_gpu *)data;
+       struct drm_device *dev = gpu->dev;
+       struct msm_drm_private *priv = dev->dev_private;
        uint32_t fence = gpu->funcs->last_fence(gpu);
 
        if (fence != gpu->hangcheck_fence) {
@@ -237,14 +240,22 @@ static void hangcheck_handler(unsigned long data)
                gpu->hangcheck_fence = fence;
        } else if (fence < gpu->submitted_fence) {
                /* no progress and not done.. hung! */
-               struct msm_drm_private *priv = gpu->dev->dev_private;
                gpu->hangcheck_fence = fence;
+               dev_err(dev->dev, "%s: hangcheck detected gpu lockup!\n",
+                               gpu->name);
+               dev_err(dev->dev, "%s:     completed fence: %u\n",
+                               gpu->name, fence);
+               dev_err(dev->dev, "%s:     submitted fence: %u\n",
+                               gpu->name, gpu->submitted_fence);
                queue_work(priv->wq, &gpu->recover_work);
        }
 
        /* if still more pending work, reset the hangcheck timer: */
        if (gpu->submitted_fence > gpu->hangcheck_fence)
                hangcheck_timer_reset(gpu);
+
+       /* workaround for missing irq: */
+       queue_work(priv->wq, &gpu->retire_work);
 }
 
 /*
@@ -265,7 +276,8 @@ static void retire_worker(struct work_struct *work)
                obj = list_first_entry(&gpu->active_list,
                                struct msm_gem_object, mm_list);
 
-               if (obj->fence <= fence) {
+               if ((obj->read_fence <= fence) &&
+                               (obj->write_fence <= fence)) {
                        /* move to inactive: */
                        msm_gem_move_to_inactive(&obj->base);
                        msm_gem_put_iova(&obj->base, gpu->id);
@@ -321,7 +333,11 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
                                        submit->gpu->id, &iova);
                }
 
-               msm_gem_move_to_active(&msm_obj->base, gpu, submit->fence);
+               if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
+                       msm_gem_move_to_active(&msm_obj->base, gpu, false, submit->fence);
+
+               if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
+                       msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence);
        }
        hangcheck_timer_reset(gpu);
        mutex_unlock(&dev->struct_mutex);
index 05ff315e8e9e03f4d2ba242b4680894cea8632cd..b162e98a2953ee392029dba8c1eae7d0e3678e97 100644 (file)
@@ -1168,6 +1168,23 @@ static const struct radeon_blacklist_clocks btc_blacklist_clocks[] =
         { 25000, 30000, RADEON_SCLK_UP }
 };
 
+void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table,
+                                                    u32 *max_clock)
+{
+       u32 i, clock = 0;
+
+       if ((table == NULL) || (table->count == 0)) {
+               *max_clock = clock;
+               return;
+       }
+
+       for (i = 0; i < table->count; i++) {
+               if (clock < table->entries[i].clk)
+                       clock = table->entries[i].clk;
+       }
+       *max_clock = clock;
+}
+
 void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_table *table,
                                        u32 clock, u16 max_voltage, u16 *voltage)
 {
@@ -2080,6 +2097,7 @@ static void btc_apply_state_adjust_rules(struct radeon_device *rdev,
        bool disable_mclk_switching;
        u32 mclk, sclk;
        u16 vddc, vddci;
+       u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
 
        if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
            btc_dpm_vblank_too_short(rdev))
@@ -2121,6 +2139,39 @@ static void btc_apply_state_adjust_rules(struct radeon_device *rdev,
                        ps->low.vddci = max_limits->vddci;
        }
 
+       /* limit clocks to max supported clocks based on voltage dependency tables */
+       btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
+                                                       &max_sclk_vddc);
+       btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
+                                                       &max_mclk_vddci);
+       btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
+                                                       &max_mclk_vddc);
+
+       if (max_sclk_vddc) {
+               if (ps->low.sclk > max_sclk_vddc)
+                       ps->low.sclk = max_sclk_vddc;
+               if (ps->medium.sclk > max_sclk_vddc)
+                       ps->medium.sclk = max_sclk_vddc;
+               if (ps->high.sclk > max_sclk_vddc)
+                       ps->high.sclk = max_sclk_vddc;
+       }
+       if (max_mclk_vddci) {
+               if (ps->low.mclk > max_mclk_vddci)
+                       ps->low.mclk = max_mclk_vddci;
+               if (ps->medium.mclk > max_mclk_vddci)
+                       ps->medium.mclk = max_mclk_vddci;
+               if (ps->high.mclk > max_mclk_vddci)
+                       ps->high.mclk = max_mclk_vddci;
+       }
+       if (max_mclk_vddc) {
+               if (ps->low.mclk > max_mclk_vddc)
+                       ps->low.mclk = max_mclk_vddc;
+               if (ps->medium.mclk > max_mclk_vddc)
+                       ps->medium.mclk = max_mclk_vddc;
+               if (ps->high.mclk > max_mclk_vddc)
+                       ps->high.mclk = max_mclk_vddc;
+       }
+
        /* XXX validate the min clocks required for display */
 
        if (disable_mclk_switching) {
index 1a15e0e41950604ec8c27df9b11c6933c43d622d..3b6f12b7760ba48066f2144b73e0dc82c6521946 100644 (file)
@@ -46,6 +46,8 @@ void btc_adjust_clock_combinations(struct radeon_device *rdev,
                                   struct rv7xx_pl *pl);
 void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_table *table,
                                        u32 clock, u16 max_voltage, u16 *voltage);
+void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table,
+                                                    u32 *max_clock);
 void btc_apply_voltage_delta_rules(struct radeon_device *rdev,
                                   u16 max_vddc, u16 max_vddci,
                                   u16 *vddc, u16 *vddci);
index 8996274430303ab3c2137b080381df612f3f181b..51e947a97edf945d02594dc0731b098ac033e1e0 100644 (file)
@@ -146,6 +146,8 @@ static const struct ci_pt_config_reg didt_config_ci[] =
 };
 
 extern u8 rv770_get_memory_module_index(struct radeon_device *rdev);
+extern void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table,
+                                                           u32 *max_clock);
 extern int ni_copy_and_switch_arb_sets(struct radeon_device *rdev,
                                       u32 arb_freq_src, u32 arb_freq_dest);
 extern u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock);
@@ -712,6 +714,7 @@ static void ci_apply_state_adjust_rules(struct radeon_device *rdev,
        struct radeon_clock_and_voltage_limits *max_limits;
        bool disable_mclk_switching;
        u32 sclk, mclk;
+       u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
        int i;
 
        if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
@@ -739,6 +742,29 @@ static void ci_apply_state_adjust_rules(struct radeon_device *rdev,
                }
        }
 
+       /* limit clocks to max supported clocks based on voltage dependency tables */
+       btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
+                                                       &max_sclk_vddc);
+       btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
+                                                       &max_mclk_vddci);
+       btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
+                                                       &max_mclk_vddc);
+
+       for (i = 0; i < ps->performance_level_count; i++) {
+               if (max_sclk_vddc) {
+                       if (ps->performance_levels[i].sclk > max_sclk_vddc)
+                               ps->performance_levels[i].sclk = max_sclk_vddc;
+               }
+               if (max_mclk_vddci) {
+                       if (ps->performance_levels[i].mclk > max_mclk_vddci)
+                               ps->performance_levels[i].mclk = max_mclk_vddci;
+               }
+               if (max_mclk_vddc) {
+                       if (ps->performance_levels[i].mclk > max_mclk_vddc)
+                               ps->performance_levels[i].mclk = max_mclk_vddc;
+               }
+       }
+
        /* XXX validate the min clocks required for display */
 
        if (disable_mclk_switching) {
index adbdb6503b0564b98867d67ac513b0d02f602054..d02fd1c045d567371a187c686e1af2b411f88aa1 100644 (file)
@@ -2845,10 +2845,8 @@ static void cik_gpu_init(struct radeon_device *rdev)
                rdev->config.cik.tile_config |= (3 << 0);
                break;
        }
-       if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
-               rdev->config.cik.tile_config |= 1 << 4;
-       else
-               rdev->config.cik.tile_config |= 0 << 4;
+       rdev->config.cik.tile_config |=
+               ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
        rdev->config.cik.tile_config |=
                ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
        rdev->config.cik.tile_config |=
@@ -4456,8 +4454,8 @@ static int cik_mc_init(struct radeon_device *rdev)
        rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
        rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
        /* size in MB on si */
-       rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
-       rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
+       rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
+       rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
        rdev->mc.visible_vram_size = rdev->mc.aper_size;
        si_vram_gtt_location(rdev, &rdev->mc);
        radeon_update_bandwidth_info(rdev);
@@ -4735,12 +4733,13 @@ static void cik_vm_decode_fault(struct radeon_device *rdev,
        u32 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
        u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT;
        u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT;
-       char *block = (char *)&mc_client;
+       char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
+               (mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
 
-       printk("VM fault (0x%02x, vmid %d) at page %u, %s from %s (%d)\n",
+       printk("VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
               protections, vmid, addr,
               (status & MEMORY_CLIENT_RW_MASK) ? "write" : "read",
-              block, mc_id);
+              block, mc_client, mc_id);
 }
 
 /**
index 6c398a456d78b53668a73dbc82f369157c918647..f26339028154274609f31247580d03cbbed0d312 100644 (file)
@@ -787,6 +787,7 @@ static void ni_apply_state_adjust_rules(struct radeon_device *rdev,
        bool disable_mclk_switching;
        u32 mclk, sclk;
        u16 vddc, vddci;
+       u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
        int i;
 
        if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
@@ -813,6 +814,29 @@ static void ni_apply_state_adjust_rules(struct radeon_device *rdev,
                }
        }
 
+       /* limit clocks to max supported clocks based on voltage dependency tables */
+       btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
+                                                       &max_sclk_vddc);
+       btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
+                                                       &max_mclk_vddci);
+       btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
+                                                       &max_mclk_vddc);
+
+       for (i = 0; i < ps->performance_level_count; i++) {
+               if (max_sclk_vddc) {
+                       if (ps->performance_levels[i].sclk > max_sclk_vddc)
+                               ps->performance_levels[i].sclk = max_sclk_vddc;
+               }
+               if (max_mclk_vddci) {
+                       if (ps->performance_levels[i].mclk > max_mclk_vddci)
+                               ps->performance_levels[i].mclk = max_mclk_vddci;
+               }
+               if (max_mclk_vddc) {
+                       if (ps->performance_levels[i].mclk > max_mclk_vddc)
+                               ps->performance_levels[i].mclk = max_mclk_vddc;
+               }
+       }
+
        /* XXX validate the min clocks required for display */
 
        if (disable_mclk_switching) {
index 24175717307bc23ca336f3e998df192f3036fccd..d71333033b2ba0bc63f9e710a23f710d7b96b066 100644 (file)
@@ -2933,9 +2933,11 @@ static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data)
        seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
        seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
        seq_printf(m, "%u dwords in ring\n", count);
-       for (j = 0; j <= count; j++) {
-               i = (rdp + j) & ring->ptr_mask;
-               seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
+       if (ring->ready) {
+               for (j = 0; j <= count; j++) {
+                       i = (rdp + j) & ring->ptr_mask;
+                       seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
+               }
        }
        return 0;
 }
index e65f211a7be016eb9e3fe09e550ff87e8a12ae9e..5513d8f06252e13e11e27e9d21d5aecae775b5a8 100644 (file)
@@ -1084,7 +1084,7 @@ int r600_parse_extended_power_table(struct radeon_device *rdev)
                                rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
                                        le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
                                rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
-                                       le16_to_cpu(limits->entries[i].usVoltage);
+                                       le16_to_cpu(entry->usVoltage);
                                entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
                                        ((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
                        }
index f443010ce90b1ed7607565474b2ba0851e378789..b0fa6002af3e98da440cbebfa1ae183163e8b07d 100644 (file)
@@ -257,10 +257,7 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
         * number (coefficient of two integer numbers.  DCCG_AUDIO_DTOx_PHASE
         * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
         */
-       if (ASIC_IS_DCE3(rdev)) {
-               /* according to the reg specs, this should DCE3.2 only, but in
-                * practice it seems to cover DCE3.0 as well.
-                */
+       if (ASIC_IS_DCE32(rdev)) {
                if (dig->dig_encoder == 0) {
                        dto_cntl = RREG32(DCCG_AUDIO_DTO0_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK;
                        dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio);
@@ -276,8 +273,21 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
                        WREG32(DCCG_AUDIO_DTO1_MODULE, dto_modulo);
                        WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */
                }
+       } else if (ASIC_IS_DCE3(rdev)) {
+               /* according to the reg specs, this should DCE3.2 only, but in
+                * practice it seems to cover DCE3.0/3.1 as well.
+                */
+               if (dig->dig_encoder == 0) {
+                       WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100);
+                       WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100);
+                       WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */
+               } else {
+                       WREG32(DCCG_AUDIO_DTO1_PHASE, base_rate * 100);
+                       WREG32(DCCG_AUDIO_DTO1_MODULE, clock * 100);
+                       WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */
+               }
        } else {
-               /* according to the reg specs, this should be DCE2.0 and DCE3.0 */
+               /* according to the reg specs, this should be DCE2.0 and DCE3.0/3.1 */
                WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate / 10) |
                       AUDIO_DTO_MODULE(clock / 10));
        }
index 5003385a75129098e7519702f49bf86d67fbc8b2..8f7e04538fd624a5ecb1c302e8b9d5f2455f808a 100644 (file)
@@ -1004,6 +1004,8 @@ static struct radeon_asic rv6xx_asic = {
                .wait_for_vblank = &avivo_wait_for_vblank,
                .set_backlight_level = &atombios_set_backlight_level,
                .get_backlight_level = &atombios_get_backlight_level,
+               .hdmi_enable = &r600_hdmi_enable,
+               .hdmi_setmode = &r600_hdmi_setmode,
        },
        .copy = {
                .blit = &r600_copy_cpdma,
index 404e25d285ba816b1f34308ce79537b8a0d44a90..f79ee184ffd5849f4d0e0ec1b87b0d94d0f1f131 100644 (file)
@@ -1367,6 +1367,7 @@ bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev,
        int index = GetIndexIntoMasterTable(DATA, PPLL_SS_Info);
        uint16_t data_offset, size;
        struct _ATOM_SPREAD_SPECTRUM_INFO *ss_info;
+       struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT *ss_assign;
        uint8_t frev, crev;
        int i, num_indices;
 
@@ -1378,18 +1379,21 @@ bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev,
 
                num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
                        sizeof(ATOM_SPREAD_SPECTRUM_ASSIGNMENT);
-
+               ss_assign = (struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT*)
+                       ((u8 *)&ss_info->asSS_Info[0]);
                for (i = 0; i < num_indices; i++) {
-                       if (ss_info->asSS_Info[i].ucSS_Id == id) {
+                       if (ss_assign->ucSS_Id == id) {
                                ss->percentage =
-                                       le16_to_cpu(ss_info->asSS_Info[i].usSpreadSpectrumPercentage);
-                               ss->type = ss_info->asSS_Info[i].ucSpreadSpectrumType;
-                               ss->step = ss_info->asSS_Info[i].ucSS_Step;
-                               ss->delay = ss_info->asSS_Info[i].ucSS_Delay;
-                               ss->range = ss_info->asSS_Info[i].ucSS_Range;
-                               ss->refdiv = ss_info->asSS_Info[i].ucRecommendedRef_Div;
+                                       le16_to_cpu(ss_assign->usSpreadSpectrumPercentage);
+                               ss->type = ss_assign->ucSpreadSpectrumType;
+                               ss->step = ss_assign->ucSS_Step;
+                               ss->delay = ss_assign->ucSS_Delay;
+                               ss->range = ss_assign->ucSS_Range;
+                               ss->refdiv = ss_assign->ucRecommendedRef_Div;
                                return true;
                        }
+                       ss_assign = (struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT*)
+                               ((u8 *)ss_assign + sizeof(struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT));
                }
        }
        return false;
@@ -1477,6 +1481,12 @@ union asic_ss_info {
        struct _ATOM_ASIC_INTERNAL_SS_INFO_V3 info_3;
 };
 
+union asic_ss_assignment {
+       struct _ATOM_ASIC_SS_ASSIGNMENT v1;
+       struct _ATOM_ASIC_SS_ASSIGNMENT_V2 v2;
+       struct _ATOM_ASIC_SS_ASSIGNMENT_V3 v3;
+};
+
 bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
                                      struct radeon_atom_ss *ss,
                                      int id, u32 clock)
@@ -1485,6 +1495,7 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
        int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
        uint16_t data_offset, size;
        union asic_ss_info *ss_info;
+       union asic_ss_assignment *ss_assign;
        uint8_t frev, crev;
        int i, num_indices;
 
@@ -1509,45 +1520,52 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
                        num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
                                sizeof(ATOM_ASIC_SS_ASSIGNMENT);
 
+                       ss_assign = (union asic_ss_assignment *)((u8 *)&ss_info->info.asSpreadSpectrum[0]);
                        for (i = 0; i < num_indices; i++) {
-                               if ((ss_info->info.asSpreadSpectrum[i].ucClockIndication == id) &&
-                                   (clock <= le32_to_cpu(ss_info->info.asSpreadSpectrum[i].ulTargetClockRange))) {
+                               if ((ss_assign->v1.ucClockIndication == id) &&
+                                   (clock <= le32_to_cpu(ss_assign->v1.ulTargetClockRange))) {
                                        ss->percentage =
-                                               le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
-                                       ss->type = ss_info->info.asSpreadSpectrum[i].ucSpreadSpectrumMode;
-                                       ss->rate = le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadRateInKhz);
+                                               le16_to_cpu(ss_assign->v1.usSpreadSpectrumPercentage);
+                                       ss->type = ss_assign->v1.ucSpreadSpectrumMode;
+                                       ss->rate = le16_to_cpu(ss_assign->v1.usSpreadRateInKhz);
                                        return true;
                                }
+                               ss_assign = (union asic_ss_assignment *)
+                                       ((u8 *)ss_assign + sizeof(ATOM_ASIC_SS_ASSIGNMENT));
                        }
                        break;
                case 2:
                        num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
                                sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2);
+                       ss_assign = (union asic_ss_assignment *)((u8 *)&ss_info->info_2.asSpreadSpectrum[0]);
                        for (i = 0; i < num_indices; i++) {
-                               if ((ss_info->info_2.asSpreadSpectrum[i].ucClockIndication == id) &&
-                                   (clock <= le32_to_cpu(ss_info->info_2.asSpreadSpectrum[i].ulTargetClockRange))) {
+                               if ((ss_assign->v2.ucClockIndication == id) &&
+                                   (clock <= le32_to_cpu(ss_assign->v2.ulTargetClockRange))) {
                                        ss->percentage =
-                                               le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
-                                       ss->type = ss_info->info_2.asSpreadSpectrum[i].ucSpreadSpectrumMode;
-                                       ss->rate = le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadRateIn10Hz);
+                                               le16_to_cpu(ss_assign->v2.usSpreadSpectrumPercentage);
+                                       ss->type = ss_assign->v2.ucSpreadSpectrumMode;
+                                       ss->rate = le16_to_cpu(ss_assign->v2.usSpreadRateIn10Hz);
                                        if ((crev == 2) &&
                                            ((id == ASIC_INTERNAL_ENGINE_SS) ||
                                             (id == ASIC_INTERNAL_MEMORY_SS)))
                                                ss->rate /= 100;
                                        return true;
                                }
+                               ss_assign = (union asic_ss_assignment *)
+                                       ((u8 *)ss_assign + sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2));
                        }
                        break;
                case 3:
                        num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
                                sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3);
+                       ss_assign = (union asic_ss_assignment *)((u8 *)&ss_info->info_3.asSpreadSpectrum[0]);
                        for (i = 0; i < num_indices; i++) {
-                               if ((ss_info->info_3.asSpreadSpectrum[i].ucClockIndication == id) &&
-                                   (clock <= le32_to_cpu(ss_info->info_3.asSpreadSpectrum[i].ulTargetClockRange))) {
+                               if ((ss_assign->v3.ucClockIndication == id) &&
+                                   (clock <= le32_to_cpu(ss_assign->v3.ulTargetClockRange))) {
                                        ss->percentage =
-                                               le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
-                                       ss->type = ss_info->info_3.asSpreadSpectrum[i].ucSpreadSpectrumMode;
-                                       ss->rate = le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadRateIn10Hz);
+                                               le16_to_cpu(ss_assign->v3.usSpreadSpectrumPercentage);
+                                       ss->type = ss_assign->v3.ucSpreadSpectrumMode;
+                                       ss->rate = le16_to_cpu(ss_assign->v3.usSpreadRateIn10Hz);
                                        if ((id == ASIC_INTERNAL_ENGINE_SS) ||
                                            (id == ASIC_INTERNAL_MEMORY_SS))
                                                ss->rate /= 100;
@@ -1555,6 +1573,8 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
                                                radeon_atombios_get_igp_ss_overrides(rdev, ss, id);
                                        return true;
                                }
+                               ss_assign = (union asic_ss_assignment *)
+                                       ((u8 *)ss_assign + sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3));
                        }
                        break;
                default:
index ac6ece61a47627931e9a3bcb68c3a34c636efe0a..66c222836631a4bc8dcd12b2f81f6e935218c566 100644 (file)
@@ -85,8 +85,9 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
                   VRAM, also but everything into VRAM on AGP cards to avoid
                   image corruptions */
                if (p->ring == R600_RING_TYPE_UVD_INDEX &&
-                   (i == 0 || p->rdev->flags & RADEON_IS_AGP)) {
-                       /* TODO: is this still needed for NI+ ? */
+                   p->rdev->family < CHIP_PALM &&
+                   (i == 0 || drm_pci_device_is_agp(p->rdev->ddev))) {
+
                        p->relocs[i].lobj.domain =
                                RADEON_GEM_DOMAIN_VRAM;
 
index e29faa73b574c6b989a9c501d15032a218ae849b..841d0e09be3e9fb109b5afd45d012590a0ee6433 100644 (file)
@@ -1320,13 +1320,22 @@ int radeon_device_init(struct radeon_device *rdev,
                        return r;
        }
        if ((radeon_testing & 1)) {
-               radeon_test_moves(rdev);
+               if (rdev->accel_working)
+                       radeon_test_moves(rdev);
+               else
+                       DRM_INFO("radeon: acceleration disabled, skipping move tests\n");
        }
        if ((radeon_testing & 2)) {
-               radeon_test_syncing(rdev);
+               if (rdev->accel_working)
+                       radeon_test_syncing(rdev);
+               else
+                       DRM_INFO("radeon: acceleration disabled, skipping sync tests\n");
        }
        if (radeon_benchmarking) {
-               radeon_benchmark(rdev, radeon_benchmarking);
+               if (rdev->accel_working)
+                       radeon_benchmark(rdev, radeon_benchmarking);
+               else
+                       DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
        }
        return 0;
 }
index 87e1d69e8fdb3b859af4ed3d809ea93db40f2c07..ac07ad1d4f8c903783acc26e3fc8f9f0e3449d44 100644 (file)
@@ -1002,7 +1002,7 @@ static void radeon_pm_resume_old(struct radeon_device *rdev)
 {
        /* set up the default clocks if the MC ucode is loaded */
        if ((rdev->family >= CHIP_BARTS) &&
-           (rdev->family <= CHIP_HAINAN) &&
+           (rdev->family <= CHIP_CAYMAN) &&
            rdev->mc_fw) {
                if (rdev->pm.default_vddc)
                        radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
@@ -1046,7 +1046,7 @@ static void radeon_pm_resume_dpm(struct radeon_device *rdev)
        if (ret) {
                DRM_ERROR("radeon: dpm resume failed\n");
                if ((rdev->family >= CHIP_BARTS) &&
-                   (rdev->family <= CHIP_HAINAN) &&
+                   (rdev->family <= CHIP_CAYMAN) &&
                    rdev->mc_fw) {
                        if (rdev->pm.default_vddc)
                                radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
@@ -1097,7 +1097,7 @@ static int radeon_pm_init_old(struct radeon_device *rdev)
                radeon_pm_init_profile(rdev);
                /* set up the default clocks if the MC ucode is loaded */
                if ((rdev->family >= CHIP_BARTS) &&
-                   (rdev->family <= CHIP_HAINAN) &&
+                   (rdev->family <= CHIP_CAYMAN) &&
                    rdev->mc_fw) {
                        if (rdev->pm.default_vddc)
                                radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
@@ -1183,7 +1183,7 @@ static int radeon_pm_init_dpm(struct radeon_device *rdev)
        if (ret) {
                rdev->pm.dpm_enabled = false;
                if ((rdev->family >= CHIP_BARTS) &&
-                   (rdev->family <= CHIP_HAINAN) &&
+                   (rdev->family <= CHIP_CAYMAN) &&
                    rdev->mc_fw) {
                        if (rdev->pm.default_vddc)
                                radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
index 46a25f037b843b70ebfb04e77ad55901111efd02..18254e1c3e718ee1c7c4cdd4321bf95b15581970 100644 (file)
@@ -839,9 +839,11 @@ static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
         * packet that is the root issue
         */
        i = (ring->rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask;
-       for (j = 0; j <= (count + 32); j++) {
-               seq_printf(m, "r[%5d]=0x%08x\n", i, ring->ring[i]);
-               i = (i + 1) & ring->ptr_mask;
+       if (ring->ready) {
+               for (j = 0; j <= (count + 32); j++) {
+                       seq_printf(m, "r[%5d]=0x%08x\n", i, ring->ring[i]);
+                       i = (i + 1) & ring->ptr_mask;
+               }
        }
        return 0;
 }
index 1a01bbff9bfa4f5c9ca1d354cd99abae981a36ce..a0f11856dddef7067871e630ed2bafcb9a2d4587 100644 (file)
@@ -476,8 +476,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
                return -EINVAL;
        }
 
-       /* TODO: is this still necessary on NI+ ? */
-       if ((cmd == 0 || cmd == 0x3) &&
+       if (p->rdev->family < CHIP_PALM && (cmd == 0 || cmd == 0x3) &&
            (start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) {
                DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n",
                          start, end);
index cfe5d4d289159c832d71375e6eca8090adab7c3f..9ace28702c761a196089c7e88442e45b08a3ed16 100644 (file)
@@ -2910,6 +2910,7 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
        bool disable_sclk_switching = false;
        u32 mclk, sclk;
        u16 vddc, vddci;
+       u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
        int i;
 
        if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
@@ -2943,6 +2944,29 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
                }
        }
 
+       /* limit clocks to max supported clocks based on voltage dependency tables */
+       btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
+                                                       &max_sclk_vddc);
+       btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
+                                                       &max_mclk_vddci);
+       btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
+                                                       &max_mclk_vddc);
+
+       for (i = 0; i < ps->performance_level_count; i++) {
+               if (max_sclk_vddc) {
+                       if (ps->performance_levels[i].sclk > max_sclk_vddc)
+                               ps->performance_levels[i].sclk = max_sclk_vddc;
+               }
+               if (max_mclk_vddci) {
+                       if (ps->performance_levels[i].mclk > max_mclk_vddci)
+                               ps->performance_levels[i].mclk = max_mclk_vddci;
+               }
+               if (max_mclk_vddc) {
+                       if (ps->performance_levels[i].mclk > max_mclk_vddc)
+                               ps->performance_levels[i].mclk = max_mclk_vddc;
+               }
+       }
+
        /* XXX validate the min clocks required for display */
 
        if (disable_mclk_switching) {
index 7266805d9786c6fe9bfd38501f7ab674f7d05a73..3100fa9cb52f4ff5f0b924af6d146f1e06ae6908 100644 (file)
@@ -212,8 +212,8 @@ int uvd_v1_0_start(struct radeon_device *rdev)
        /* enable VCPU clock */
        WREG32(UVD_VCPU_CNTL,  1 << 9);
 
-       /* enable UMC */
-       WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8));
+       /* enable UMC and NC0 */
+       WREG32_P(UVD_LMI_CTRL2, 1 << 13, ~((1 << 8) | (1 << 13)));
 
        /* boot up the VCPU */
        WREG32(UVD_SOFT_RESET, 0);
index 8f4743ab5fb279ae416fbce5ff4e16e58db45eaf..936093e0271e3c7fffccce7fe0e688c4d6f3ad01 100644 (file)
@@ -195,7 +195,7 @@ int vmbus_connect(void)
 
        do {
                ret = vmbus_negotiate_version(msginfo, version);
-               if (ret)
+               if (ret == -ETIMEDOUT)
                        goto cleanup;
 
                if (vmbus_connection.conn_state == CONNECTED)
index 28b03325b8729ffdf2d3356538dc2515b5af5de2..09988b2896226552e4be5cd69d5f09ba3f842d11 100644 (file)
 /*
  * Pre win8 version numbers used in ws2008 and ws 2008 r2 (win7)
  */
+#define WS2008_SRV_MAJOR       1
+#define WS2008_SRV_MINOR       0
+#define WS2008_SRV_VERSION     (WS2008_SRV_MAJOR << 16 | WS2008_SRV_MINOR)
+
 #define WIN7_SRV_MAJOR   3
 #define WIN7_SRV_MINOR   0
-#define WIN7_SRV_MAJOR_MINOR     (WIN7_SRV_MAJOR << 16 | WIN7_SRV_MINOR)
+#define WIN7_SRV_VERSION     (WIN7_SRV_MAJOR << 16 | WIN7_SRV_MINOR)
 
 #define WIN8_SRV_MAJOR   4
 #define WIN8_SRV_MINOR   0
-#define WIN8_SRV_MAJOR_MINOR     (WIN8_SRV_MAJOR << 16 | WIN8_SRV_MINOR)
+#define WIN8_SRV_VERSION     (WIN8_SRV_MAJOR << 16 | WIN8_SRV_MINOR)
 
 /*
  * Global state maintained for transaction that is being processed.
@@ -587,6 +591,8 @@ void hv_kvp_onchannelcallback(void *context)
 
        struct icmsg_hdr *icmsghdrp;
        struct icmsg_negotiate *negop = NULL;
+       int util_fw_version;
+       int kvp_srv_version;
 
        if (kvp_transaction.active) {
                /*
@@ -606,17 +612,26 @@ void hv_kvp_onchannelcallback(void *context)
 
                if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
                        /*
-                        * We start with win8 version and if the host cannot
-                        * support that we use the previous version.
+                        * Based on the host, select appropriate
+                        * framework and service versions we will
+                        * negotiate.
                         */
-                       if (vmbus_prep_negotiate_resp(icmsghdrp, negop,
-                                recv_buffer, UTIL_FW_MAJOR_MINOR,
-                                WIN8_SRV_MAJOR_MINOR))
-                               goto done;
-
+                       switch (vmbus_proto_version) {
+                       case (VERSION_WS2008):
+                               util_fw_version = UTIL_WS2K8_FW_VERSION;
+                               kvp_srv_version = WS2008_SRV_VERSION;
+                               break;
+                       case (VERSION_WIN7):
+                               util_fw_version = UTIL_FW_VERSION;
+                               kvp_srv_version = WIN7_SRV_VERSION;
+                               break;
+                       default:
+                               util_fw_version = UTIL_FW_VERSION;
+                               kvp_srv_version = WIN8_SRV_VERSION;
+                       }
                        vmbus_prep_negotiate_resp(icmsghdrp, negop,
-                                recv_buffer, UTIL_FW_MAJOR_MINOR,
-                                WIN7_SRV_MAJOR_MINOR);
+                                recv_buffer, util_fw_version,
+                                kvp_srv_version);
 
                } else {
                        kvp_msg = (struct hv_kvp_msg *)&recv_buffer[
@@ -649,7 +664,6 @@ void hv_kvp_onchannelcallback(void *context)
                        return;
 
                }
-done:
 
                icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
                        | ICMSGHDRFLAG_RESPONSE;
index e4572f3f2834fe917c5f4dbf097a30cbc8f3b383..0c354622437681d1b6dd157f718bb27cf8dbdbed 100644 (file)
@@ -26,7 +26,7 @@
 
 #define VSS_MAJOR  5
 #define VSS_MINOR  0
-#define VSS_MAJOR_MINOR    (VSS_MAJOR << 16 | VSS_MINOR)
+#define VSS_VERSION    (VSS_MAJOR << 16 | VSS_MINOR)
 
 
 
@@ -190,8 +190,8 @@ void hv_vss_onchannelcallback(void *context)
 
                if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
                        vmbus_prep_negotiate_resp(icmsghdrp, negop,
-                                recv_buffer, UTIL_FW_MAJOR_MINOR,
-                                VSS_MAJOR_MINOR);
+                                recv_buffer, UTIL_FW_VERSION,
+                                VSS_VERSION);
                } else {
                        vss_msg = (struct hv_vss_msg *)&recv_buffer[
                                sizeof(struct vmbuspipe_hdr) +
index cb82233541b196a53177007ebb4520a9e4112442..273e3ddb3a20b00c5d1ae03bab6368c5fcfbc26f 100644 (file)
 #include <linux/reboot.h>
 #include <linux/hyperv.h>
 
-#define SHUTDOWN_MAJOR 3
-#define SHUTDOWN_MINOR  0
-#define SHUTDOWN_MAJOR_MINOR   (SHUTDOWN_MAJOR << 16 | SHUTDOWN_MINOR)
 
-#define TIMESYNCH_MAJOR        3
-#define TIMESYNCH_MINOR 0
-#define TIMESYNCH_MAJOR_MINOR  (TIMESYNCH_MAJOR << 16 | TIMESYNCH_MINOR)
+#define SD_MAJOR       3
+#define SD_MINOR       0
+#define SD_VERSION     (SD_MAJOR << 16 | SD_MINOR)
 
-#define HEARTBEAT_MAJOR        3
-#define HEARTBEAT_MINOR 0
-#define HEARTBEAT_MAJOR_MINOR  (HEARTBEAT_MAJOR << 16 | HEARTBEAT_MINOR)
+#define SD_WS2008_MAJOR                1
+#define SD_WS2008_VERSION      (SD_WS2008_MAJOR << 16 | SD_MINOR)
+
+#define TS_MAJOR       3
+#define TS_MINOR       0
+#define TS_VERSION     (TS_MAJOR << 16 | TS_MINOR)
+
+#define TS_WS2008_MAJOR                1
+#define TS_WS2008_VERSION      (TS_WS2008_MAJOR << 16 | TS_MINOR)
+
+#define HB_MAJOR       3
+#define HB_MINOR 0
+#define HB_VERSION     (HB_MAJOR << 16 | HB_MINOR)
+
+#define HB_WS2008_MAJOR        1
+#define HB_WS2008_VERSION      (HB_WS2008_MAJOR << 16 | HB_MINOR)
+
+static int sd_srv_version;
+static int ts_srv_version;
+static int hb_srv_version;
+static int util_fw_version;
 
 static void shutdown_onchannelcallback(void *context);
 static struct hv_util_service util_shutdown = {
@@ -99,8 +114,8 @@ static void shutdown_onchannelcallback(void *context)
 
                if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
                        vmbus_prep_negotiate_resp(icmsghdrp, negop,
-                                       shut_txf_buf, UTIL_FW_MAJOR_MINOR,
-                                       SHUTDOWN_MAJOR_MINOR);
+                                       shut_txf_buf, util_fw_version,
+                                       sd_srv_version);
                } else {
                        shutdown_msg =
                                (struct shutdown_msg_data *)&shut_txf_buf[
@@ -216,6 +231,7 @@ static void timesync_onchannelcallback(void *context)
        struct icmsg_hdr *icmsghdrp;
        struct ictimesync_data *timedatap;
        u8 *time_txf_buf = util_timesynch.recv_buffer;
+       struct icmsg_negotiate *negop = NULL;
 
        vmbus_recvpacket(channel, time_txf_buf,
                         PAGE_SIZE, &recvlen, &requestid);
@@ -225,9 +241,10 @@ static void timesync_onchannelcallback(void *context)
                                sizeof(struct vmbuspipe_hdr)];
 
                if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
-                       vmbus_prep_negotiate_resp(icmsghdrp, NULL, time_txf_buf,
-                                               UTIL_FW_MAJOR_MINOR,
-                                               TIMESYNCH_MAJOR_MINOR);
+                       vmbus_prep_negotiate_resp(icmsghdrp, negop,
+                                               time_txf_buf,
+                                               util_fw_version,
+                                               ts_srv_version);
                } else {
                        timedatap = (struct ictimesync_data *)&time_txf_buf[
                                sizeof(struct vmbuspipe_hdr) +
@@ -257,6 +274,7 @@ static void heartbeat_onchannelcallback(void *context)
        struct icmsg_hdr *icmsghdrp;
        struct heartbeat_msg_data *heartbeat_msg;
        u8 *hbeat_txf_buf = util_heartbeat.recv_buffer;
+       struct icmsg_negotiate *negop = NULL;
 
        vmbus_recvpacket(channel, hbeat_txf_buf,
                         PAGE_SIZE, &recvlen, &requestid);
@@ -266,9 +284,9 @@ static void heartbeat_onchannelcallback(void *context)
                                sizeof(struct vmbuspipe_hdr)];
 
                if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
-                       vmbus_prep_negotiate_resp(icmsghdrp, NULL,
-                               hbeat_txf_buf, UTIL_FW_MAJOR_MINOR,
-                               HEARTBEAT_MAJOR_MINOR);
+                       vmbus_prep_negotiate_resp(icmsghdrp, negop,
+                               hbeat_txf_buf, util_fw_version,
+                               hb_srv_version);
                } else {
                        heartbeat_msg =
                                (struct heartbeat_msg_data *)&hbeat_txf_buf[
@@ -321,6 +339,25 @@ static int util_probe(struct hv_device *dev,
                goto error;
 
        hv_set_drvdata(dev, srv);
+       /*
+        * Based on the host; initialize the framework and
+        * service version numbers we will negotiate.
+        */
+       switch (vmbus_proto_version) {
+       case (VERSION_WS2008):
+               util_fw_version = UTIL_WS2K8_FW_VERSION;
+               sd_srv_version = SD_WS2008_VERSION;
+               ts_srv_version = TS_WS2008_VERSION;
+               hb_srv_version = HB_WS2008_VERSION;
+               break;
+
+       default:
+               util_fw_version = UTIL_FW_VERSION;
+               sd_srv_version = SD_VERSION;
+               ts_srv_version = TS_VERSION;
+               hb_srv_version = HB_VERSION;
+       }
+
        return 0;
 
 error:
index 62c2e32e25ef6823290380074d50829af30e5751..98814d12a6040e00a9a56d80686d9392c78b3765 100644 (file)
@@ -525,16 +525,25 @@ static int applesmc_init_smcreg_try(void)
 {
        struct applesmc_registers *s = &smcreg;
        bool left_light_sensor, right_light_sensor;
+       unsigned int count;
        u8 tmp[1];
        int ret;
 
        if (s->init_complete)
                return 0;
 
-       ret = read_register_count(&s->key_count);
+       ret = read_register_count(&count);
        if (ret)
                return ret;
 
+       if (s->cache && s->key_count != count) {
+               pr_warn("key count changed from %d to %d\n",
+                       s->key_count, count);
+               kfree(s->cache);
+               s->cache = NULL;
+       }
+       s->key_count = count;
+
        if (!s->cache)
                s->cache = kcalloc(s->key_count, sizeof(*s->cache), GFP_KERNEL);
        if (!s->cache)
index dbecf08399f86dd30baa7fa7b8964e2daf616bcb..5888feef1ac5a959b5f0471a3fb71b9a2b00bada 100644 (file)
@@ -98,6 +98,8 @@
 
 #define DW_IC_ERR_TX_ABRT      0x1
 
+#define DW_IC_TAR_10BITADDR_MASTER BIT(12)
+
 /*
  * status codes
  */
@@ -388,22 +390,34 @@ static int i2c_dw_wait_bus_not_busy(struct dw_i2c_dev *dev)
 static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
 {
        struct i2c_msg *msgs = dev->msgs;
-       u32 ic_con;
+       u32 ic_con, ic_tar = 0;
 
        /* Disable the adapter */
        __i2c_dw_enable(dev, false);
 
-       /* set the slave (target) address */
-       dw_writel(dev, msgs[dev->msg_write_idx].addr, DW_IC_TAR);
-
        /* if the slave address is ten bit address, enable 10BITADDR */
        ic_con = dw_readl(dev, DW_IC_CON);
-       if (msgs[dev->msg_write_idx].flags & I2C_M_TEN)
+       if (msgs[dev->msg_write_idx].flags & I2C_M_TEN) {
                ic_con |= DW_IC_CON_10BITADDR_MASTER;
-       else
+               /*
+                * If I2C_DYNAMIC_TAR_UPDATE is set, the 10-bit addressing
+                * mode has to be enabled via bit 12 of IC_TAR register.
+                * We set it always as I2C_DYNAMIC_TAR_UPDATE can't be
+                * detected from registers.
+                */
+               ic_tar = DW_IC_TAR_10BITADDR_MASTER;
+       } else {
                ic_con &= ~DW_IC_CON_10BITADDR_MASTER;
+       }
+
        dw_writel(dev, ic_con, DW_IC_CON);
 
+       /*
+        * Set the slave (target) address and enable 10-bit addressing mode
+        * if applicable.
+        */
+       dw_writel(dev, msgs[dev->msg_write_idx].addr | ic_tar, DW_IC_TAR);
+
        /* Enable the adapter */
        __i2c_dw_enable(dev, true);
 
index 8ed79a086f858012b0f22b291834a5fb357d075c..1672effbcebb23894b99deac27bb84d595fa28d2 100644 (file)
@@ -393,6 +393,9 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr,
 
        desc = &priv->hw[priv->head];
 
+       /* Initialize the DMA buffer */
+       memset(priv->dma_buffer, 0, sizeof(priv->dma_buffer));
+
        /* Initialize the descriptor */
        memset(desc, 0, sizeof(struct ismt_desc));
        desc->tgtaddr_rw = ISMT_DESC_ADDR_RW(addr, read_write);
index 7f3a4744349476f941a4500c9edc6a65a6da0fe2..d3e9cc3153a973dc62f99d7d607e003179f41f9d 100644 (file)
@@ -234,9 +234,9 @@ static int mv64xxx_i2c_offload_msg(struct mv64xxx_i2c_data *drv_data)
                ctrl_reg |= MV64XXX_I2C_BRIDGE_CONTROL_WR |
                    (msg->len - 1) << MV64XXX_I2C_BRIDGE_CONTROL_TX_SIZE_SHIFT;
 
-               writel_relaxed(data_reg_lo,
+               writel(data_reg_lo,
                        drv_data->reg_base + MV64XXX_I2C_REG_TX_DATA_LO);
-               writel_relaxed(data_reg_hi,
+               writel(data_reg_hi,
                        drv_data->reg_base + MV64XXX_I2C_REG_TX_DATA_HI);
 
        } else {
@@ -697,6 +697,7 @@ static const struct of_device_id mv64xxx_i2c_of_match_table[] = {
 MODULE_DEVICE_TABLE(of, mv64xxx_i2c_of_match_table);
 
 #ifdef CONFIG_OF
+#ifdef CONFIG_HAVE_CLK
 static int
 mv64xxx_calc_freq(const int tclk, const int n, const int m)
 {
@@ -726,16 +727,12 @@ mv64xxx_find_baud_factors(const u32 req_freq, const u32 tclk, u32 *best_n,
                return false;
        return true;
 }
+#endif /* CONFIG_HAVE_CLK */
 
 static int
 mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
                  struct device *dev)
 {
-       const struct of_device_id *device;
-       struct device_node *np = dev->of_node;
-       u32 bus_freq, tclk;
-       int rc = 0;
-
        /* CLK is mandatory when using DT to describe the i2c bus. We
         * need to know tclk in order to calculate bus clock
         * factors.
@@ -744,6 +741,11 @@ mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
        /* Have OF but no CLK */
        return -ENODEV;
 #else
+       const struct of_device_id *device;
+       struct device_node *np = dev->of_node;
+       u32 bus_freq, tclk;
+       int rc = 0;
+
        if (IS_ERR(drv_data->clk)) {
                rc = -ENODEV;
                goto out;
index 3535f3c0f7b43233b09123c89b4727fc302cba3b..3747b9bf67d6440f684bcff945a2a85b93f6fe6c 100644 (file)
@@ -1178,8 +1178,6 @@ static int s3c24xx_i2c_remove(struct platform_device *pdev)
 
        i2c_del_adapter(&i2c->adap);
 
-       clk_disable_unprepare(i2c->clk);
-
        if (pdev->dev.of_node && IS_ERR(i2c->pctrl))
                s3c24xx_i2c_dt_gpio_free(i2c);
 
index 12e32e6b41037c334c1eb8b3cef0e70145f8d228..81e3dc260993124981d45cacb9e472e9221d7b8a 100644 (file)
@@ -620,7 +620,7 @@ static int bma180_remove(struct i2c_client *client)
 #ifdef CONFIG_PM_SLEEP
 static int bma180_suspend(struct device *dev)
 {
-       struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+       struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
        struct bma180_data *data = iio_priv(indio_dev);
        int ret;
 
@@ -633,7 +633,7 @@ static int bma180_suspend(struct device *dev)
 
 static int bma180_resume(struct device *dev)
 {
-       struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+       struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
        struct bma180_data *data = iio_priv(indio_dev);
        int ret;
 
index 84be63bdf0382b6b44dfa14ff818e72377baf779..0f16b553e063f602e4e83104382827bb55cc72b2 100644 (file)
@@ -556,7 +556,7 @@ static const struct iio_info at91_adc_info = {
 
 static int at91_adc_probe(struct platform_device *pdev)
 {
-       unsigned int prsc, mstrclk, ticks, adc_clk, shtim;
+       unsigned int prsc, mstrclk, ticks, adc_clk, adc_clk_khz, shtim;
        int ret;
        struct iio_dev *idev;
        struct at91_adc_state *st;
@@ -649,6 +649,7 @@ static int at91_adc_probe(struct platform_device *pdev)
         */
        mstrclk = clk_get_rate(st->clk);
        adc_clk = clk_get_rate(st->adc_clk);
+       adc_clk_khz = adc_clk / 1000;
        prsc = (mstrclk / (2 * adc_clk)) - 1;
 
        if (!st->startup_time) {
@@ -662,15 +663,15 @@ static int at91_adc_probe(struct platform_device *pdev)
         * defined in the electrical characteristics of the board, divided by 8.
         * The formula thus is : Startup Time = (ticks + 1) * 8 / ADC Clock
         */
-       ticks = round_up((st->startup_time * adc_clk /
-                         1000000) - 1, 8) / 8;
+       ticks = round_up((st->startup_time * adc_clk_khz /
+                         1000) - 1, 8) / 8;
        /*
         * a minimal Sample and Hold Time is necessary for the ADC to guarantee
         * the best converted final value between two channels selection
         * The formula thus is : Sample and Hold Time = (shtim + 1) / ADCClock
         */
-       shtim = round_up((st->sample_hold_time * adc_clk /
-                         1000000) - 1, 1);
+       shtim = round_up((st->sample_hold_time * adc_clk_khz /
+                         1000) - 1, 1);
 
        reg = AT91_ADC_PRESCAL_(prsc) & st->registers->mr_prescal_mask;
        reg |= AT91_ADC_STARTUP_(ticks) & st->registers->mr_startup_mask;
index 9d19ba74f22bd92125501e2827ef51bbae59da13..415f3c6efd7293087cc1d5fa9313e9de308e6245 100644 (file)
@@ -41,6 +41,8 @@ struct iio_cb_buffer *iio_channel_get_all_cb(struct device *dev,
                goto error_ret;
        }
 
+       iio_buffer_init(&cb_buff->buffer);
+
        cb_buff->private = private;
        cb_buff->cb = cb;
        cb_buff->buffer.access = &iio_cb_access;
index 1f4a48e6a82c33f29b8985ad556698f0630e21af..1397b6e0e414c25835aebd829dc2bede15dedee3 100644 (file)
@@ -37,21 +37,21 @@ struct mcp4725_data {
 
 static int mcp4725_suspend(struct device *dev)
 {
-       struct iio_dev *indio_dev = dev_to_iio_dev(dev);
-       struct mcp4725_data *data = iio_priv(indio_dev);
+       struct mcp4725_data *data = iio_priv(i2c_get_clientdata(
+               to_i2c_client(dev)));
        u8 outbuf[2];
 
        outbuf[0] = (data->powerdown_mode + 1) << 4;
        outbuf[1] = 0;
        data->powerdown = true;
 
-       return i2c_master_send(to_i2c_client(dev), outbuf, 2);
+       return i2c_master_send(data->client, outbuf, 2);
 }
 
 static int mcp4725_resume(struct device *dev)
 {
-       struct iio_dev *indio_dev = dev_to_iio_dev(dev);
-       struct mcp4725_data *data = iio_priv(indio_dev);
+       struct mcp4725_data *data = iio_priv(i2c_get_clientdata(
+               to_i2c_client(dev)));
        u8 outbuf[2];
 
        /* restore previous DAC value */
@@ -59,7 +59,7 @@ static int mcp4725_resume(struct device *dev)
        outbuf[1] = data->dac_value & 0xff;
        data->powerdown = false;
 
-       return i2c_master_send(to_i2c_client(dev), outbuf, 2);
+       return i2c_master_send(data->client, outbuf, 2);
 }
 
 #ifdef CONFIG_PM_SLEEP
index 05c1b74502a37265c86a23edaf97d4e602ead575..9b32253b824be454bf2f6c46bf02acb022b3be46 100644 (file)
@@ -49,11 +49,15 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
 #define iio_buffer_poll_addr (&iio_buffer_poll)
 #define iio_buffer_read_first_n_outer_addr (&iio_buffer_read_first_n_outer)
 
+void iio_disable_all_buffers(struct iio_dev *indio_dev);
+
 #else
 
 #define iio_buffer_poll_addr NULL
 #define iio_buffer_read_first_n_outer_addr NULL
 
+static inline void iio_disable_all_buffers(struct iio_dev *indio_dev) {}
+
 #endif
 
 int iio_device_register_eventset(struct iio_dev *indio_dev);
index e73033f3839a5e1eceba486cadf2825b30fb7027..2710f7245c3b5472b0e924bd22a94f6bc1212b6f 100644 (file)
@@ -460,6 +460,25 @@ static int iio_compute_scan_bytes(struct iio_dev *indio_dev, const long *mask,
        return bytes;
 }
 
+void iio_disable_all_buffers(struct iio_dev *indio_dev)
+{
+       struct iio_buffer *buffer, *_buffer;
+
+       if (list_empty(&indio_dev->buffer_list))
+               return;
+
+       if (indio_dev->setup_ops->predisable)
+               indio_dev->setup_ops->predisable(indio_dev);
+
+       list_for_each_entry_safe(buffer, _buffer,
+                       &indio_dev->buffer_list, buffer_list)
+               list_del_init(&buffer->buffer_list);
+
+       indio_dev->currentmode = INDIO_DIRECT_MODE;
+       if (indio_dev->setup_ops->postdisable)
+               indio_dev->setup_ops->postdisable(indio_dev);
+}
+
 int iio_update_buffers(struct iio_dev *indio_dev,
                       struct iio_buffer *insert_buffer,
                       struct iio_buffer *remove_buffer)
@@ -528,8 +547,15 @@ int iio_update_buffers(struct iio_dev *indio_dev,
                         * Note can only occur when adding a buffer.
                         */
                        list_del(&insert_buffer->buffer_list);
-                       indio_dev->active_scan_mask = old_mask;
-                       success = -EINVAL;
+                       if (old_mask) {
+                               indio_dev->active_scan_mask = old_mask;
+                               success = -EINVAL;
+                       }
+                       else {
+                               kfree(compound_mask);
+                               ret = -EINVAL;
+                               goto error_ret;
+                       }
                }
        } else {
                indio_dev->active_scan_mask = compound_mask;
index 97f0297b120f41e7f73d9b5a5cf27887da146a92..8e84cd522e4970af8603e10a157657b9ab14102e 100644 (file)
@@ -848,8 +848,6 @@ static void iio_device_unregister_sysfs(struct iio_dev *indio_dev)
 static void iio_dev_release(struct device *device)
 {
        struct iio_dev *indio_dev = dev_to_iio_dev(device);
-       if (indio_dev->chrdev.dev)
-               cdev_del(&indio_dev->chrdev);
        if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
                iio_device_unregister_trigger_consumer(indio_dev);
        iio_device_unregister_eventset(indio_dev);
@@ -970,6 +968,8 @@ static int iio_chrdev_open(struct inode *inode, struct file *filp)
        if (test_and_set_bit(IIO_BUSY_BIT_POS, &indio_dev->flags))
                return -EBUSY;
 
+       iio_device_get(indio_dev);
+
        filp->private_data = indio_dev;
 
        return 0;
@@ -983,6 +983,8 @@ static int iio_chrdev_release(struct inode *inode, struct file *filp)
        struct iio_dev *indio_dev = container_of(inode->i_cdev,
                                                struct iio_dev, chrdev);
        clear_bit(IIO_BUSY_BIT_POS, &indio_dev->flags);
+       iio_device_put(indio_dev);
+
        return 0;
 }
 
@@ -1052,18 +1054,20 @@ int iio_device_register(struct iio_dev *indio_dev)
                indio_dev->setup_ops == NULL)
                indio_dev->setup_ops = &noop_ring_setup_ops;
 
-       ret = device_add(&indio_dev->dev);
-       if (ret < 0)
-               goto error_unreg_eventset;
        cdev_init(&indio_dev->chrdev, &iio_buffer_fileops);
        indio_dev->chrdev.owner = indio_dev->info->driver_module;
+       indio_dev->chrdev.kobj.parent = &indio_dev->dev.kobj;
        ret = cdev_add(&indio_dev->chrdev, indio_dev->dev.devt, 1);
        if (ret < 0)
-               goto error_del_device;
-       return 0;
+               goto error_unreg_eventset;
 
-error_del_device:
-       device_del(&indio_dev->dev);
+       ret = device_add(&indio_dev->dev);
+       if (ret < 0)
+               goto error_cdev_del;
+
+       return 0;
+error_cdev_del:
+       cdev_del(&indio_dev->chrdev);
 error_unreg_eventset:
        iio_device_unregister_eventset(indio_dev);
 error_free_sysfs:
@@ -1078,9 +1082,16 @@ EXPORT_SYMBOL(iio_device_register);
 void iio_device_unregister(struct iio_dev *indio_dev)
 {
        mutex_lock(&indio_dev->info_exist_lock);
+
+       device_del(&indio_dev->dev);
+
+       if (indio_dev->chrdev.dev)
+               cdev_del(&indio_dev->chrdev);
+
+       iio_disable_all_buffers(indio_dev);
+
        indio_dev->info = NULL;
        mutex_unlock(&indio_dev->info_exist_lock);
-       device_del(&indio_dev->dev);
 }
 EXPORT_SYMBOL(iio_device_unregister);
 subsys_initcall(iio_init);
index 10aa9ef86cece1b80fa09c57d2a52e344e4dfc12..6be65ef5faa9b6e46716857af7ed67d8eb7ea004 100644 (file)
@@ -72,7 +72,8 @@ EXPORT_SYMBOL(iio_push_event);
 static unsigned int iio_event_poll(struct file *filep,
                             struct poll_table_struct *wait)
 {
-       struct iio_event_interface *ev_int = filep->private_data;
+       struct iio_dev *indio_dev = filep->private_data;
+       struct iio_event_interface *ev_int = indio_dev->event_interface;
        unsigned int events = 0;
 
        poll_wait(filep, &ev_int->wait, wait);
@@ -90,7 +91,8 @@ static ssize_t iio_event_chrdev_read(struct file *filep,
                                     size_t count,
                                     loff_t *f_ps)
 {
-       struct iio_event_interface *ev_int = filep->private_data;
+       struct iio_dev *indio_dev = filep->private_data;
+       struct iio_event_interface *ev_int = indio_dev->event_interface;
        unsigned int copied;
        int ret;
 
@@ -121,7 +123,8 @@ error_unlock:
 
 static int iio_event_chrdev_release(struct inode *inode, struct file *filep)
 {
-       struct iio_event_interface *ev_int = filep->private_data;
+       struct iio_dev *indio_dev = filep->private_data;
+       struct iio_event_interface *ev_int = indio_dev->event_interface;
 
        spin_lock_irq(&ev_int->wait.lock);
        __clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
@@ -133,6 +136,8 @@ static int iio_event_chrdev_release(struct inode *inode, struct file *filep)
        kfifo_reset_out(&ev_int->det_events);
        spin_unlock_irq(&ev_int->wait.lock);
 
+       iio_device_put(indio_dev);
+
        return 0;
 }
 
@@ -158,12 +163,15 @@ int iio_event_getfd(struct iio_dev *indio_dev)
                return -EBUSY;
        }
        spin_unlock_irq(&ev_int->wait.lock);
-       fd = anon_inode_getfd("iio:event",
-                               &iio_event_chrdev_fileops, ev_int, O_RDONLY);
+       iio_device_get(indio_dev);
+
+       fd = anon_inode_getfd("iio:event", &iio_event_chrdev_fileops,
+                               indio_dev, O_RDONLY);
        if (fd < 0) {
                spin_lock_irq(&ev_int->wait.lock);
                __clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
                spin_unlock_irq(&ev_int->wait.lock);
+               iio_device_put(indio_dev);
        }
        return fd;
 }
@@ -276,7 +284,7 @@ static int iio_device_add_event_sysfs(struct iio_dev *indio_dev,
                        goto error_ret;
                }
                if (chan->modified)
-                       mask = IIO_MOD_EVENT_CODE(chan->type, 0, chan->channel,
+                       mask = IIO_MOD_EVENT_CODE(chan->type, 0, chan->channel2,
                                                  i/IIO_EV_DIR_MAX,
                                                  i%IIO_EV_DIR_MAX);
                else if (chan->differential)
index 64ccde3f1f7a60ae6693d1e91f117e370bd0141b..6d63883da1ab0e56be798538e1d965b1032eacb7 100644 (file)
@@ -255,12 +255,14 @@ static int tmp006_remove(struct i2c_client *client)
 #ifdef CONFIG_PM_SLEEP
 static int tmp006_suspend(struct device *dev)
 {
-       return tmp006_powerdown(iio_priv(dev_to_iio_dev(dev)));
+       struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+       return tmp006_powerdown(iio_priv(indio_dev));
 }
 
 static int tmp006_resume(struct device *dev)
 {
-       struct tmp006_data *data = iio_priv(dev_to_iio_dev(dev));
+       struct tmp006_data *data = iio_priv(i2c_get_clientdata(
+               to_i2c_client(dev)));
        return i2c_smbus_write_word_swapped(data->client, TMP006_CONFIG,
                data->config | TMP006_CONFIG_MOD_MASK);
 }
index dab4b41f1715846bd2a26c03fd90f0175766980e..a082fd9e7ebe009465f961eab3ad8b840839f7f9 100644 (file)
@@ -2294,7 +2294,7 @@ static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv)
        int low, high, remaining;
        unsigned int rover;
 
-       inet_get_local_port_range(&low, &high);
+       inet_get_local_port_range(&init_net, &low, &high);
        remaining = (high - low) + 1;
        rover = net_random() % remaining + low;
 retry:
index 7cab5c3276c2e16f9e91fb97a635e7ccdf6640b5..e1519718ce67e3ed4363078eb3e8207b6b9f1db6 100644 (file)
@@ -288,9 +288,9 @@ int divas_um_idi_delete_entity(int adapter_nr, void *entity)
        cleanup_entity(e);
        diva_os_free(0, e->os_context);
        memset(e, 0x00, sizeof(*e));
-       diva_os_free(0, e);
 
        DBG_LOG(("A(%d) remove E:%08x", adapter_nr, e));
+       diva_os_free(0, e);
 
        return (0);
 }
index b39f6f0b45f27b89f29e580844e41d8d315d9c18..0f12382aa35d6c939b53967b205639134e181eae 100644 (file)
@@ -498,7 +498,7 @@ struct cached_dev {
         */
        atomic_t                has_dirty;
 
-       struct ratelimit        writeback_rate;
+       struct bch_ratelimit    writeback_rate;
        struct delayed_work     writeback_rate_update;
 
        /*
@@ -507,10 +507,9 @@ struct cached_dev {
         */
        sector_t                last_read;
 
-       /* Number of writeback bios in flight */
-       atomic_t                in_flight;
+       /* Limit number of writeback bios in flight */
+       struct semaphore        in_flight;
        struct closure_with_timer writeback;
-       struct closure_waitlist writeback_wait;
 
        struct keybuf           writeback_keys;
 
index 8010eed06a51c8320786a1c49463c98c8aa70555..22d1ae72c2826a53b67656a59ea717a62a3b95a4 100644 (file)
@@ -926,28 +926,45 @@ struct bkey *bch_next_recurse_key(struct btree *b, struct bkey *search)
 
 /* Mergesort */
 
+static void sort_key_next(struct btree_iter *iter,
+                         struct btree_iter_set *i)
+{
+       i->k = bkey_next(i->k);
+
+       if (i->k == i->end)
+               *i = iter->data[--iter->used];
+}
+
 static void btree_sort_fixup(struct btree_iter *iter)
 {
        while (iter->used > 1) {
                struct btree_iter_set *top = iter->data, *i = top + 1;
-               struct bkey *k;
 
                if (iter->used > 2 &&
                    btree_iter_cmp(i[0], i[1]))
                        i++;
 
-               for (k = i->k;
-                    k != i->end && bkey_cmp(top->k, &START_KEY(k)) > 0;
-                    k = bkey_next(k))
-                       if (top->k > i->k)
-                               __bch_cut_front(top->k, k);
-                       else if (KEY_SIZE(k))
-                               bch_cut_back(&START_KEY(k), top->k);
-
-               if (top->k < i->k || k == i->k)
+               if (bkey_cmp(top->k, &START_KEY(i->k)) <= 0)
                        break;
 
-               heap_sift(iter, i - top, btree_iter_cmp);
+               if (!KEY_SIZE(i->k)) {
+                       sort_key_next(iter, i);
+                       heap_sift(iter, i - top, btree_iter_cmp);
+                       continue;
+               }
+
+               if (top->k > i->k) {
+                       if (bkey_cmp(top->k, i->k) >= 0)
+                               sort_key_next(iter, i);
+                       else
+                               bch_cut_front(top->k, i->k);
+
+                       heap_sift(iter, i - top, btree_iter_cmp);
+               } else {
+                       /* can't happen because of comparison func */
+                       BUG_ON(!bkey_cmp(&START_KEY(top->k), &START_KEY(i->k)));
+                       bch_cut_back(&START_KEY(i->k), top->k);
+               }
        }
 }
 
index f9764e61978b5749487862b5ab88eb73b13f18ba..f42fc7ed9cd63b14fd4cf54a879dfd9d046584d1 100644 (file)
@@ -255,7 +255,7 @@ void bch_btree_node_read(struct btree *b)
 
        return;
 err:
-       bch_cache_set_error(b->c, "io error reading bucket %lu",
+       bch_cache_set_error(b->c, "io error reading bucket %zu",
                            PTR_BUCKET_NR(b->c, &b->key, 0));
 }
 
@@ -612,7 +612,7 @@ static unsigned long bch_mca_scan(struct shrinker *shrink,
                return SHRINK_STOP;
 
        /* Return -1 if we can't do anything right now */
-       if (sc->gfp_mask & __GFP_WAIT)
+       if (sc->gfp_mask & __GFP_IO)
                mutex_lock(&c->bucket_lock);
        else if (!mutex_trylock(&c->bucket_lock))
                return -1;
index ba95ab84b2be5a5a32292625d229bb29e051a53d..8435f81e5d858012e8aca6be8204e923a34b1d01 100644 (file)
@@ -153,7 +153,8 @@ int bch_journal_read(struct cache_set *c, struct list_head *list,
                bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);
                pr_debug("%u journal buckets", ca->sb.njournal_buckets);
 
-               /* Read journal buckets ordered by golden ratio hash to quickly
+               /*
+                * Read journal buckets ordered by golden ratio hash to quickly
                 * find a sequence of buckets with valid journal entries
                 */
                for (i = 0; i < ca->sb.njournal_buckets; i++) {
@@ -166,18 +167,20 @@ int bch_journal_read(struct cache_set *c, struct list_head *list,
                                goto bsearch;
                }
 
-               /* If that fails, check all the buckets we haven't checked
+               /*
+                * If that fails, check all the buckets we haven't checked
                 * already
                 */
                pr_debug("falling back to linear search");
 
-               for (l = 0; l < ca->sb.njournal_buckets; l++) {
-                       if (test_bit(l, bitmap))
-                               continue;
-
+               for (l = find_first_zero_bit(bitmap, ca->sb.njournal_buckets);
+                    l < ca->sb.njournal_buckets;
+                    l = find_next_zero_bit(bitmap, ca->sb.njournal_buckets, l + 1))
                        if (read_bucket(l))
                                goto bsearch;
-               }
+
+               if (list_empty(list))
+                       continue;
 bsearch:
                /* Binary search */
                m = r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1);
@@ -197,10 +200,12 @@ bsearch:
                                r = m;
                }
 
-               /* Read buckets in reverse order until we stop finding more
+               /*
+                * Read buckets in reverse order until we stop finding more
                 * journal entries
                 */
-               pr_debug("finishing up");
+               pr_debug("finishing up: m %u njournal_buckets %u",
+                        m, ca->sb.njournal_buckets);
                l = m;
 
                while (1) {
@@ -228,9 +233,10 @@ bsearch:
                        }
        }
 
-       c->journal.seq = list_entry(list->prev,
-                                   struct journal_replay,
-                                   list)->j.seq;
+       if (!list_empty(list))
+               c->journal.seq = list_entry(list->prev,
+                                           struct journal_replay,
+                                           list)->j.seq;
 
        return 0;
 #undef read_bucket
@@ -428,7 +434,7 @@ static void do_journal_discard(struct cache *ca)
                return;
        }
 
-       switch (atomic_read(&ja->discard_in_flight) == DISCARD_IN_FLIGHT) {
+       switch (atomic_read(&ja->discard_in_flight)) {
        case DISCARD_IN_FLIGHT:
                return;
 
@@ -689,6 +695,7 @@ void bch_journal_meta(struct cache_set *c, struct closure *cl)
                if (cl)
                        BUG_ON(!closure_wait(&w->wait, cl));
 
+               closure_flush(&c->journal.io);
                __journal_try_write(c, true);
        }
 }
index 786a1a4f74d853fafe3ab2ae263d2ecf780134aa..71eb233b9ace7a23f143170e8f4cc924a2a1de9a 100644 (file)
@@ -997,14 +997,17 @@ static void request_write(struct cached_dev *dc, struct search *s)
        } else {
                bch_writeback_add(dc);
 
-               if (s->op.flush_journal) {
+               if (bio->bi_rw & REQ_FLUSH) {
                        /* Also need to send a flush to the backing device */
-                       s->op.cache_bio = bio_clone_bioset(bio, GFP_NOIO,
-                                                          dc->disk.bio_split);
+                       struct bio *flush = bio_alloc_bioset(0, GFP_NOIO,
+                                                            dc->disk.bio_split);
 
-                       bio->bi_size = 0;
-                       bio->bi_vcnt = 0;
-                       closure_bio_submit(bio, cl, s->d);
+                       flush->bi_rw    = WRITE_FLUSH;
+                       flush->bi_bdev  = bio->bi_bdev;
+                       flush->bi_end_io = request_endio;
+                       flush->bi_private = cl;
+
+                       closure_bio_submit(flush, cl, s->d);
                } else {
                        s->op.cache_bio = bio;
                }
index 4fe6ab2fbe2ede59644441521aa4cc2f27f5d312..924dcfdae11102256e1ce193eefc01d82cc173cc 100644 (file)
@@ -223,8 +223,13 @@ STORE(__cached_dev)
        }
 
        if (attr == &sysfs_label) {
-               /* note: endlines are preserved */
-               memcpy(dc->sb.label, buf, SB_LABEL_SIZE);
+               if (size > SB_LABEL_SIZE)
+                       return -EINVAL;
+               memcpy(dc->sb.label, buf, size);
+               if (size < SB_LABEL_SIZE)
+                       dc->sb.label[size] = '\0';
+               if (size && dc->sb.label[size - 1] == '\n')
+                       dc->sb.label[size - 1] = '\0';
                bch_write_bdev_super(dc, NULL);
                if (dc->disk.c) {
                        memcpy(dc->disk.c->uuids[dc->disk.id].label,
index 98eb81159a22ba9f9e88fec53127e2e338ae3d4d..420dad545c7d8a01e8b5c18d26db4b25677fd334 100644 (file)
@@ -190,7 +190,16 @@ void bch_time_stats_update(struct time_stats *stats, uint64_t start_time)
        stats->last = now ?: 1;
 }
 
-unsigned bch_next_delay(struct ratelimit *d, uint64_t done)
+/**
+ * bch_next_delay() - increment @d by the amount of work done, and return how
+ * long to delay until the next time to do some work.
+ *
+ * @d - the struct bch_ratelimit to update
+ * @done - the amount of work done, in arbitrary units
+ *
+ * Returns the amount of time to delay by, in jiffies
+ */
+uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done)
 {
        uint64_t now = local_clock();
 
index 1ae2a73ad85f5628b5292b769d79b0f28252f4ca..ea345c6896f47777942b64f88810c99d4cbc278e 100644 (file)
@@ -450,17 +450,23 @@ read_attribute(name ## _last_ ## frequency_units)
        (ewma) >> factor;                                               \
 })
 
-struct ratelimit {
+struct bch_ratelimit {
+       /* Next time we want to do some work, in nanoseconds */
        uint64_t                next;
+
+       /*
+        * Rate at which we want to do work, in units per nanosecond
+        * The units here correspond to the units passed to bch_next_delay()
+        */
        unsigned                rate;
 };
 
-static inline void ratelimit_reset(struct ratelimit *d)
+static inline void bch_ratelimit_reset(struct bch_ratelimit *d)
 {
        d->next = local_clock();
 }
 
-unsigned bch_next_delay(struct ratelimit *d, uint64_t done);
+uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done);
 
 #define __DIV_SAFE(n, d, zero)                                         \
 ({                                                                     \
index 22cbff551628f3c9cff87ccc35563c09974f03b3..ba3ee48320f2a38509adb2603f766c55e67f1da1 100644 (file)
@@ -94,11 +94,15 @@ static void update_writeback_rate(struct work_struct *work)
 
 static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors)
 {
+       uint64_t ret;
+
        if (atomic_read(&dc->disk.detaching) ||
            !dc->writeback_percent)
                return 0;
 
-       return bch_next_delay(&dc->writeback_rate, sectors * 10000000ULL);
+       ret = bch_next_delay(&dc->writeback_rate, sectors * 10000000ULL);
+
+       return min_t(uint64_t, ret, HZ);
 }
 
 /* Background writeback */
@@ -208,7 +212,7 @@ normal_refill:
 
        up_write(&dc->writeback_lock);
 
-       ratelimit_reset(&dc->writeback_rate);
+       bch_ratelimit_reset(&dc->writeback_rate);
 
        /* Punt to workqueue only so we don't recurse and blow the stack */
        continue_at(cl, read_dirty, dirty_wq);
@@ -318,9 +322,7 @@ static void write_dirty_finish(struct closure *cl)
        }
 
        bch_keybuf_del(&dc->writeback_keys, w);
-       atomic_dec_bug(&dc->in_flight);
-
-       closure_wake_up(&dc->writeback_wait);
+       up(&dc->in_flight);
 
        closure_return_with_destructor(cl, dirty_io_destructor);
 }
@@ -349,7 +351,7 @@ static void write_dirty(struct closure *cl)
 
        closure_bio_submit(&io->bio, cl, &io->dc->disk);
 
-       continue_at(cl, write_dirty_finish, dirty_wq);
+       continue_at(cl, write_dirty_finish, system_wq);
 }
 
 static void read_dirty_endio(struct bio *bio, int error)
@@ -369,7 +371,7 @@ static void read_dirty_submit(struct closure *cl)
 
        closure_bio_submit(&io->bio, cl, &io->dc->disk);
 
-       continue_at(cl, write_dirty, dirty_wq);
+       continue_at(cl, write_dirty, system_wq);
 }
 
 static void read_dirty(struct closure *cl)
@@ -394,12 +396,8 @@ static void read_dirty(struct closure *cl)
 
                if (delay > 0 &&
                    (KEY_START(&w->key) != dc->last_read ||
-                    jiffies_to_msecs(delay) > 50)) {
-                       w->private = NULL;
-
-                       closure_delay(&dc->writeback, delay);
-                       continue_at(cl, read_dirty, dirty_wq);
-               }
+                    jiffies_to_msecs(delay) > 50))
+                       delay = schedule_timeout_uninterruptible(delay);
 
                dc->last_read   = KEY_OFFSET(&w->key);
 
@@ -424,15 +422,10 @@ static void read_dirty(struct closure *cl)
 
                trace_bcache_writeback(&w->key);
 
-               closure_call(&io->cl, read_dirty_submit, NULL, &dc->disk.cl);
+               down(&dc->in_flight);
+               closure_call(&io->cl, read_dirty_submit, NULL, cl);
 
                delay = writeback_delay(dc, KEY_SIZE(&w->key));
-
-               atomic_inc(&dc->in_flight);
-
-               if (!closure_wait_event(&dc->writeback_wait, cl,
-                                       atomic_read(&dc->in_flight) < 64))
-                       continue_at(cl, read_dirty, dirty_wq);
        }
 
        if (0) {
@@ -442,7 +435,11 @@ err:
                bch_keybuf_del(&dc->writeback_keys, w);
        }
 
-       refill_dirty(cl);
+       /*
+        * Wait for outstanding writeback IOs to finish (and keybuf slots to be
+        * freed) before refilling again
+        */
+       continue_at(cl, refill_dirty, dirty_wq);
 }
 
 /* Init */
@@ -484,6 +481,7 @@ void bch_sectors_dirty_init(struct cached_dev *dc)
 
 void bch_cached_dev_writeback_init(struct cached_dev *dc)
 {
+       sema_init(&dc->in_flight, 64);
        closure_init_unlocked(&dc->writeback);
        init_rwsem(&dc->writeback_lock);
 
@@ -513,7 +511,7 @@ void bch_writeback_exit(void)
 
 int __init bch_writeback_init(void)
 {
-       dirty_wq = create_singlethread_workqueue("bcache_writeback");
+       dirty_wq = create_workqueue("bcache_writeback");
        if (!dirty_wq)
                return -ENOMEM;
 
index ea49834377c8e17b6e8b221e4bd58389a8d7c32e..2a20986a2fec9701cd25e443c990f2b7a8479f9f 100644 (file)
@@ -19,8 +19,6 @@
 #define DM_MSG_PREFIX "io"
 
 #define DM_IO_MAX_REGIONS      BITS_PER_LONG
-#define MIN_IOS                16
-#define MIN_BIOS       16
 
 struct dm_io_client {
        mempool_t *pool;
@@ -50,16 +48,17 @@ static struct kmem_cache *_dm_io_cache;
 struct dm_io_client *dm_io_client_create(void)
 {
        struct dm_io_client *client;
+       unsigned min_ios = dm_get_reserved_bio_based_ios();
 
        client = kmalloc(sizeof(*client), GFP_KERNEL);
        if (!client)
                return ERR_PTR(-ENOMEM);
 
-       client->pool = mempool_create_slab_pool(MIN_IOS, _dm_io_cache);
+       client->pool = mempool_create_slab_pool(min_ios, _dm_io_cache);
        if (!client->pool)
                goto bad;
 
-       client->bios = bioset_create(MIN_BIOS, 0);
+       client->bios = bioset_create(min_ios, 0);
        if (!client->bios)
                goto bad;
 
index b759a127f9c3718bbfffe2d16ca258fe4afe15e2..de570a55876451a0326d071da4cf68fd772eec2e 100644 (file)
@@ -7,6 +7,7 @@
 
 #include <linux/device-mapper.h>
 
+#include "dm.h"
 #include "dm-path-selector.h"
 #include "dm-uevent.h"
 
@@ -116,8 +117,6 @@ struct dm_mpath_io {
 
 typedef int (*action_fn) (struct pgpath *pgpath);
 
-#define MIN_IOS 256    /* Mempool size */
-
 static struct kmem_cache *_mpio_cache;
 
 static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
@@ -190,6 +189,7 @@ static void free_priority_group(struct priority_group *pg,
 static struct multipath *alloc_multipath(struct dm_target *ti)
 {
        struct multipath *m;
+       unsigned min_ios = dm_get_reserved_rq_based_ios();
 
        m = kzalloc(sizeof(*m), GFP_KERNEL);
        if (m) {
@@ -202,7 +202,7 @@ static struct multipath *alloc_multipath(struct dm_target *ti)
                INIT_WORK(&m->trigger_event, trigger_event);
                init_waitqueue_head(&m->pg_init_wait);
                mutex_init(&m->work_mutex);
-               m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache);
+               m->mpio_pool = mempool_create_slab_pool(min_ios, _mpio_cache);
                if (!m->mpio_pool) {
                        kfree(m);
                        return NULL;
@@ -1268,6 +1268,7 @@ static int noretry_error(int error)
        case -EREMOTEIO:
        case -EILSEQ:
        case -ENODATA:
+       case -ENOSPC:
                return 1;
        }
 
@@ -1298,8 +1299,17 @@ static int do_end_io(struct multipath *m, struct request *clone,
        if (!error && !clone->errors)
                return 0;       /* I/O complete */
 
-       if (noretry_error(error))
+       if (noretry_error(error)) {
+               if ((clone->cmd_flags & REQ_WRITE_SAME) &&
+                   !clone->q->limits.max_write_same_sectors) {
+                       struct queue_limits *limits;
+
+                       /* device doesn't really support WRITE SAME, disable it */
+                       limits = dm_get_queue_limits(dm_table_get_md(m->ti->table));
+                       limits->max_write_same_sectors = 0;
+               }
                return error;
+       }
 
        if (mpio->pgpath)
                fail_path(mpio->pgpath);
index 3ac415675b6c778b5dd22aaaf4ee6c2dc4ca48eb..4caa8e6d59d7968584e23a9187bc07f8c81df321 100644 (file)
@@ -256,7 +256,7 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw,
         */
        INIT_WORK_ONSTACK(&req.work, do_metadata);
        queue_work(ps->metadata_wq, &req.work);
-       flush_work(&req.work);
+       flush_workqueue(ps->metadata_wq);
 
        return req.result;
 }
index c434e5aab2dfc9e6a63ca7700e5ac1c1deacd025..aec57d76db5d616c8e692fa95cee58a8f62a0573 100644 (file)
@@ -725,17 +725,16 @@ static int calc_max_buckets(void)
  */
 static int init_hash_tables(struct dm_snapshot *s)
 {
-       sector_t hash_size, cow_dev_size, origin_dev_size, max_buckets;
+       sector_t hash_size, cow_dev_size, max_buckets;
 
        /*
         * Calculate based on the size of the original volume or
         * the COW volume...
         */
        cow_dev_size = get_dev_size(s->cow->bdev);
-       origin_dev_size = get_dev_size(s->origin->bdev);
        max_buckets = calc_max_buckets();
 
-       hash_size = min(origin_dev_size, cow_dev_size) >> s->store->chunk_shift;
+       hash_size = cow_dev_size >> s->store->chunk_shift;
        hash_size = min(hash_size, max_buckets);
 
        if (hash_size < 64)
index 8ae31e8d3d64964652fd1bd8d31d35d3ed49e879..3d404c1371ed2d7e6f4fa052fd4379fbc89ec388 100644 (file)
@@ -451,19 +451,26 @@ static void dm_stat_for_entry(struct dm_stat *s, size_t entry,
        struct dm_stat_percpu *p;
 
        /*
-        * For strict correctness we should use local_irq_disable/enable
+        * For strict correctness we should use local_irq_save/restore
         * instead of preempt_disable/enable.
         *
-        * This is racy if the driver finishes bios from non-interrupt
-        * context as well as from interrupt context or from more different
-        * interrupts.
+        * preempt_disable/enable is racy if the driver finishes bios
+        * from non-interrupt context as well as from interrupt context
+        * or from more different interrupts.
         *
-        * However, the race only results in not counting some events,
-        * so it is acceptable.
+        * On 64-bit architectures the race only results in not counting some
+        * events, so it is acceptable.  On 32-bit architectures the race could
+        * cause the counter going off by 2^32, so we need to do proper locking
+        * there.
         *
         * part_stat_lock()/part_stat_unlock() have this race too.
         */
+#if BITS_PER_LONG == 32
+       unsigned long flags;
+       local_irq_save(flags);
+#else
        preempt_disable();
+#endif
        p = &s->stat_percpu[smp_processor_id()][entry];
 
        if (!end) {
@@ -478,7 +485,11 @@ static void dm_stat_for_entry(struct dm_stat *s, size_t entry,
                p->ticks[idx] += duration;
        }
 
+#if BITS_PER_LONG == 32
+       local_irq_restore(flags);
+#else
        preempt_enable();
+#endif
 }
 
 static void __dm_stat_bio(struct dm_stat *s, unsigned long bi_rw,
index ed063427d676f64b53f3d9569449fc0daaf174b0..2c0cf511ec2385fa5a558b5d2e1e1ed0c874c9f6 100644 (file)
@@ -2095,6 +2095,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
         * them down to the data device.  The thin device's discard
         * processing will cause mappings to be removed from the btree.
         */
+       ti->discard_zeroes_data_unsupported = true;
        if (pf.discard_enabled && pf.discard_passdown) {
                ti->num_discard_bios = 1;
 
@@ -2104,7 +2105,6 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
                 * thin devices' discard limits consistent).
                 */
                ti->discards_supported = true;
-               ti->discard_zeroes_data_unsupported = true;
        }
        ti->private = pt;
 
@@ -2689,8 +2689,16 @@ static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
         * They get transferred to the live pool in bind_control_target()
         * called from pool_preresume().
         */
-       if (!pt->adjusted_pf.discard_enabled)
+       if (!pt->adjusted_pf.discard_enabled) {
+               /*
+                * Must explicitly disallow stacking discard limits otherwise the
+                * block layer will stack them if pool's data device has support.
+                * QUEUE_FLAG_DISCARD wouldn't be set but there is no way for the
+                * user to see that, so make sure to set all discard limits to 0.
+                */
+               limits->discard_granularity = 0;
                return;
+       }
 
        disable_passdown_if_not_supported(pt);
 
@@ -2826,10 +2834,10 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
        ti->per_bio_data_size = sizeof(struct dm_thin_endio_hook);
 
        /* In case the pool supports discards, pass them on. */
+       ti->discard_zeroes_data_unsupported = true;
        if (tc->pool->pf.discard_enabled) {
                ti->discards_supported = true;
                ti->num_discard_bios = 1;
-               ti->discard_zeroes_data_unsupported = true;
                /* Discard bios must be split on a block boundary */
                ti->split_discard_bios = true;
        }
index 6a5e9ed2fcc3eb775268e2e5fc401efa55d6da3d..b3e26c7d141771c74d24726f8b5a56ea2134b156 100644 (file)
@@ -211,10 +211,55 @@ struct dm_md_mempools {
        struct bio_set *bs;
 };
 
-#define MIN_IOS 256
+#define RESERVED_BIO_BASED_IOS         16
+#define RESERVED_REQUEST_BASED_IOS     256
+#define RESERVED_MAX_IOS               1024
 static struct kmem_cache *_io_cache;
 static struct kmem_cache *_rq_tio_cache;
 
+/*
+ * Bio-based DM's mempools' reserved IOs set by the user.
+ */
+static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS;
+
+/*
+ * Request-based DM's mempools' reserved IOs set by the user.
+ */
+static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS;
+
+static unsigned __dm_get_reserved_ios(unsigned *reserved_ios,
+                                     unsigned def, unsigned max)
+{
+       unsigned ios = ACCESS_ONCE(*reserved_ios);
+       unsigned modified_ios = 0;
+
+       if (!ios)
+               modified_ios = def;
+       else if (ios > max)
+               modified_ios = max;
+
+       if (modified_ios) {
+               (void)cmpxchg(reserved_ios, ios, modified_ios);
+               ios = modified_ios;
+       }
+
+       return ios;
+}
+
+unsigned dm_get_reserved_bio_based_ios(void)
+{
+       return __dm_get_reserved_ios(&reserved_bio_based_ios,
+                                    RESERVED_BIO_BASED_IOS, RESERVED_MAX_IOS);
+}
+EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios);
+
+unsigned dm_get_reserved_rq_based_ios(void)
+{
+       return __dm_get_reserved_ios(&reserved_rq_based_ios,
+                                    RESERVED_REQUEST_BASED_IOS, RESERVED_MAX_IOS);
+}
+EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios);
+
 static int __init local_init(void)
 {
        int r = -ENOMEM;
@@ -2277,6 +2322,17 @@ struct target_type *dm_get_immutable_target_type(struct mapped_device *md)
        return md->immutable_target_type;
 }
 
+/*
+ * The queue_limits are only valid as long as you have a reference
+ * count on 'md'.
+ */
+struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
+{
+       BUG_ON(!atomic_read(&md->holders));
+       return &md->queue->limits;
+}
+EXPORT_SYMBOL_GPL(dm_get_queue_limits);
+
 /*
  * Fully initialize a request-based queue (->elevator, ->request_fn, etc).
  */
@@ -2862,18 +2918,18 @@ struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, u
 
        if (type == DM_TYPE_BIO_BASED) {
                cachep = _io_cache;
-               pool_size = 16;
+               pool_size = dm_get_reserved_bio_based_ios();
                front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
        } else if (type == DM_TYPE_REQUEST_BASED) {
                cachep = _rq_tio_cache;
-               pool_size = MIN_IOS;
+               pool_size = dm_get_reserved_rq_based_ios();
                front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
                /* per_bio_data_size is not used. See __bind_mempools(). */
                WARN_ON(per_bio_data_size != 0);
        } else
                goto out;
 
-       pools->io_pool = mempool_create_slab_pool(MIN_IOS, cachep);
+       pools->io_pool = mempool_create_slab_pool(pool_size, cachep);
        if (!pools->io_pool)
                goto out;
 
@@ -2924,6 +2980,13 @@ module_exit(dm_exit);
 
 module_param(major, uint, 0);
 MODULE_PARM_DESC(major, "The major number of the device mapper");
+
+module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools");
+
+module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools");
+
 MODULE_DESCRIPTION(DM_NAME " driver");
 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
 MODULE_LICENSE("GPL");
index 5e604cc7b4aa26c41db96c84c1fa0f306bcb698e..1d1ad7b7e527e671d1265007b25e2e04217509ab 100644 (file)
@@ -184,6 +184,9 @@ void dm_free_md_mempools(struct dm_md_mempools *pools);
 /*
  * Helpers that are used by DM core
  */
+unsigned dm_get_reserved_bio_based_ios(void);
+unsigned dm_get_reserved_rq_based_ios(void);
+
 static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen)
 {
        return !maxlen || strlen(result) + 1 >= maxlen;
index d0fdc134068a05ed9967fe98ca7d9a6d6429e710..f6ff711aa5bbbaa4a177dbd416ccdab53017bb23 100644 (file)
@@ -57,6 +57,7 @@ void mei_amthif_reset_params(struct mei_device *dev)
        dev->iamthif_ioctl = false;
        dev->iamthif_state = MEI_IAMTHIF_IDLE;
        dev->iamthif_timer = 0;
+       dev->iamthif_stall_timer = 0;
 }
 
 /**
index 6d0282c08a06cdbe6233a379ee575882b1d7a525..cd2033cd7120d7631fa8d345409ed9ea86259799 100644 (file)
@@ -297,10 +297,13 @@ int __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length)
 
        if (cl->reading_state != MEI_READ_COMPLETE &&
            !waitqueue_active(&cl->rx_wait)) {
+
                mutex_unlock(&dev->device_lock);
 
                if (wait_event_interruptible(cl->rx_wait,
-                               (MEI_READ_COMPLETE == cl->reading_state))) {
+                               cl->reading_state == MEI_READ_COMPLETE  ||
+                               mei_cl_is_transitioning(cl))) {
+
                        if (signal_pending(current))
                                return -EINTR;
                        return -ERESTARTSYS;
index 9eb031e920701e8ad8feadf3bec2900b182e4a5c..892cc4207fa202629e27698c0ae536f0b81880b0 100644 (file)
@@ -90,6 +90,12 @@ static inline bool mei_cl_is_connected(struct mei_cl *cl)
                cl->dev->dev_state == MEI_DEV_ENABLED &&
                cl->state == MEI_FILE_CONNECTED);
 }
+static inline bool mei_cl_is_transitioning(struct mei_cl *cl)
+{
+       return (MEI_FILE_INITIALIZING == cl->state ||
+               MEI_FILE_DISCONNECTED == cl->state ||
+               MEI_FILE_DISCONNECTING == cl->state);
+}
 
 bool mei_cl_is_other_connecting(struct mei_cl *cl);
 int mei_cl_disconnect(struct mei_cl *cl);
index 6127ab64bb399323e57e418e9742c2f5b8a9d86d..0a0448326e9d583f951932b220d90186c59f92b7 100644 (file)
@@ -35,11 +35,15 @@ static void mei_hbm_me_cl_allocate(struct mei_device *dev)
        struct mei_me_client *clients;
        int b;
 
+       dev->me_clients_num = 0;
+       dev->me_client_presentation_num = 0;
+       dev->me_client_index = 0;
+
        /* count how many ME clients we have */
        for_each_set_bit(b, dev->me_clients_map, MEI_CLIENTS_MAX)
                dev->me_clients_num++;
 
-       if (dev->me_clients_num <= 0)
+       if (dev->me_clients_num == 0)
                return;
 
        kfree(dev->me_clients);
@@ -221,7 +225,7 @@ static int mei_hbm_prop_req(struct mei_device *dev)
        struct hbm_props_request *prop_req;
        const size_t len = sizeof(struct hbm_props_request);
        unsigned long next_client_index;
-       u8 client_num;
+       unsigned long client_num;
 
 
        client_num = dev->me_client_presentation_num;
@@ -677,8 +681,6 @@ void mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
                if (dev->dev_state == MEI_DEV_INIT_CLIENTS &&
                    dev->hbm_state == MEI_HBM_ENUM_CLIENTS) {
                                dev->init_clients_timer = 0;
-                               dev->me_client_presentation_num = 0;
-                               dev->me_client_index = 0;
                                mei_hbm_me_cl_allocate(dev);
                                dev->hbm_state = MEI_HBM_CLIENT_PROPERTIES;
 
index 92c73118b13c450149e21ec48b990e6329f4e5b1..6197018e2f16a24296619442fe6c8744c7c55b88 100644 (file)
@@ -175,6 +175,9 @@ void mei_reset(struct mei_device *dev, int interrupts_enabled)
                memset(&dev->wr_ext_msg, 0, sizeof(dev->wr_ext_msg));
        }
 
+       /* we're already in reset, cancel the init timer */
+       dev->init_clients_timer = 0;
+
        dev->me_clients_num = 0;
        dev->rd_msg_hdr = 0;
        dev->wd_pending = false;
index 173ff095be0dd6747145c9653726b3e00f85cd81..cabeddd66c1f406f73f7e5bed2a56b7e52cd7621 100644 (file)
@@ -249,19 +249,16 @@ static ssize_t mei_read(struct file *file, char __user *ubuf,
                mutex_unlock(&dev->device_lock);
 
                if (wait_event_interruptible(cl->rx_wait,
-                       (MEI_READ_COMPLETE == cl->reading_state ||
-                        MEI_FILE_INITIALIZING == cl->state ||
-                        MEI_FILE_DISCONNECTED == cl->state ||
-                        MEI_FILE_DISCONNECTING == cl->state))) {
+                               MEI_READ_COMPLETE == cl->reading_state ||
+                               mei_cl_is_transitioning(cl))) {
+
                        if (signal_pending(current))
                                return -EINTR;
                        return -ERESTARTSYS;
                }
 
                mutex_lock(&dev->device_lock);
-               if (MEI_FILE_INITIALIZING == cl->state ||
-                   MEI_FILE_DISCONNECTED == cl->state ||
-                   MEI_FILE_DISCONNECTING == cl->state) {
+               if (mei_cl_is_transitioning(cl)) {
                        rets = -EBUSY;
                        goto out;
                }
index 7b918b2fb89468ad6c653a3ff9e0f616aefa5022..456b322013e269fc61f911d6f5761768c2863d4f 100644 (file)
@@ -396,9 +396,9 @@ struct mei_device {
        struct mei_me_client *me_clients; /* Note: memory has to be allocated */
        DECLARE_BITMAP(me_clients_map, MEI_CLIENTS_MAX);
        DECLARE_BITMAP(host_clients_map, MEI_CLIENTS_MAX);
-       u8 me_clients_num;
-       u8 me_client_presentation_num;
-       u8 me_client_index;
+       unsigned long me_clients_num;
+       unsigned long me_client_presentation_num;
+       unsigned long me_client_index;
 
        struct mei_cl wd_cl;
        enum mei_wd_states wd_state;
index 0d8f427ade938c75052d804138cd377b02c34f99..ea3e64e22e22a7b39bafb895882d2bd47db70a05 100644 (file)
@@ -135,41 +135,6 @@ static inline struct bonding *__get_bond_by_port(struct port *port)
        return bond_get_bond_by_slave(port->slave);
 }
 
-/**
- * __get_first_port - get the first port in the bond
- * @bond: the bond we're looking at
- *
- * Return the port of the first slave in @bond, or %NULL if it can't be found.
- */
-static inline struct port *__get_first_port(struct bonding *bond)
-{
-       struct slave *first_slave = bond_first_slave(bond);
-
-       return first_slave ? &(SLAVE_AD_INFO(first_slave).port) : NULL;
-}
-
-/**
- * __get_next_port - get the next port in the bond
- * @port: the port we're looking at
- *
- * Return the port of the slave that is next in line of @port's slave in the
- * bond, or %NULL if it can't be found.
- */
-static inline struct port *__get_next_port(struct port *port)
-{
-       struct bonding *bond = __get_bond_by_port(port);
-       struct slave *slave = port->slave, *slave_next;
-
-       // If there's no bond for this port, or this is the last slave
-       if (bond == NULL)
-               return NULL;
-       slave_next = bond_next_slave(bond, slave);
-       if (!slave_next || bond_is_first_slave(bond, slave_next))
-               return NULL;
-
-       return &(SLAVE_AD_INFO(slave_next).port);
-}
-
 /**
  * __get_first_agg - get the first aggregator in the bond
  * @bond: the bond we're looking at
@@ -190,28 +155,6 @@ static inline struct aggregator *__get_first_agg(struct port *port)
        return first_slave ? &(SLAVE_AD_INFO(first_slave).aggregator) : NULL;
 }
 
-/**
- * __get_next_agg - get the next aggregator in the bond
- * @aggregator: the aggregator we're looking at
- *
- * Return the aggregator of the slave that is next in line of @aggregator's
- * slave in the bond, or %NULL if it can't be found.
- */
-static inline struct aggregator *__get_next_agg(struct aggregator *aggregator)
-{
-       struct slave *slave = aggregator->slave, *slave_next;
-       struct bonding *bond = bond_get_bond_by_slave(slave);
-
-       // If there's no bond for this aggregator, or this is the last slave
-       if (bond == NULL)
-               return NULL;
-       slave_next = bond_next_slave(bond, slave);
-       if (!slave_next || bond_is_first_slave(bond, slave_next))
-               return NULL;
-
-       return &(SLAVE_AD_INFO(slave_next).aggregator);
-}
-
 /*
  * __agg_has_partner
  *
@@ -755,16 +698,15 @@ static u32 __get_agg_bandwidth(struct aggregator *aggregator)
  */
 static struct aggregator *__get_active_agg(struct aggregator *aggregator)
 {
-       struct aggregator *retval = NULL;
+       struct bonding *bond = aggregator->slave->bond;
+       struct list_head *iter;
+       struct slave *slave;
 
-       for (; aggregator; aggregator = __get_next_agg(aggregator)) {
-               if (aggregator->is_active) {
-                       retval = aggregator;
-                       break;
-               }
-       }
+       bond_for_each_slave(bond, slave, iter)
+               if (SLAVE_AD_INFO(slave).aggregator.is_active)
+                       return &(SLAVE_AD_INFO(slave).aggregator);
 
-       return retval;
+       return NULL;
 }
 
 /**
@@ -1274,12 +1216,17 @@ static void ad_port_selection_logic(struct port *port)
 {
        struct aggregator *aggregator, *free_aggregator = NULL, *temp_aggregator;
        struct port *last_port = NULL, *curr_port;
+       struct list_head *iter;
+       struct bonding *bond;
+       struct slave *slave;
        int found = 0;
 
        // if the port is already Selected, do nothing
        if (port->sm_vars & AD_PORT_SELECTED)
                return;
 
+       bond = __get_bond_by_port(port);
+
        // if the port is connected to other aggregator, detach it
        if (port->aggregator) {
                // detach the port from its former aggregator
@@ -1320,8 +1267,8 @@ static void ad_port_selection_logic(struct port *port)
                }
        }
        // search on all aggregators for a suitable aggregator for this port
-       for (aggregator = __get_first_agg(port); aggregator;
-            aggregator = __get_next_agg(aggregator)) {
+       bond_for_each_slave(bond, slave, iter) {
+               aggregator = &(SLAVE_AD_INFO(slave).aggregator);
 
                // keep a free aggregator for later use(if needed)
                if (!aggregator->lag_ports) {
@@ -1515,19 +1462,23 @@ static int agg_device_up(const struct aggregator *agg)
 static void ad_agg_selection_logic(struct aggregator *agg)
 {
        struct aggregator *best, *active, *origin;
+       struct bonding *bond = agg->slave->bond;
+       struct list_head *iter;
+       struct slave *slave;
        struct port *port;
 
        origin = agg;
        active = __get_active_agg(agg);
        best = (active && agg_device_up(active)) ? active : NULL;
 
-       do {
+       bond_for_each_slave(bond, slave, iter) {
+               agg = &(SLAVE_AD_INFO(slave).aggregator);
+
                agg->is_active = 0;
 
                if (agg->num_of_ports && agg_device_up(agg))
                        best = ad_agg_selection_test(best, agg);
-
-       } while ((agg = __get_next_agg(agg)));
+       }
 
        if (best &&
            __get_agg_selection_mode(best->lag_ports) == BOND_AD_STABLE) {
@@ -1565,8 +1516,8 @@ static void ad_agg_selection_logic(struct aggregator *agg)
                         best->lag_ports, best->slave,
                         best->slave ? best->slave->dev->name : "NULL");
 
-               for (agg = __get_first_agg(best->lag_ports); agg;
-                    agg = __get_next_agg(agg)) {
+               bond_for_each_slave(bond, slave, iter) {
+                       agg = &(SLAVE_AD_INFO(slave).aggregator);
 
                        pr_debug("Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n",
                                 agg->aggregator_identifier, agg->num_of_ports,
@@ -1614,13 +1565,7 @@ static void ad_agg_selection_logic(struct aggregator *agg)
                }
        }
 
-       if (origin->slave) {
-               struct bonding *bond;
-
-               bond = bond_get_bond_by_slave(origin->slave);
-               if (bond)
-                       bond_3ad_set_carrier(bond);
-       }
+       bond_3ad_set_carrier(bond);
 }
 
 /**
@@ -1969,6 +1914,9 @@ void bond_3ad_unbind_slave(struct slave *slave)
        struct port *port, *prev_port, *temp_port;
        struct aggregator *aggregator, *new_aggregator, *temp_aggregator;
        int select_new_active_agg = 0;
+       struct bonding *bond = slave->bond;
+       struct slave *slave_iter;
+       struct list_head *iter;
 
        // find the aggregator related to this slave
        aggregator = &(SLAVE_AD_INFO(slave).aggregator);
@@ -1998,14 +1946,16 @@ void bond_3ad_unbind_slave(struct slave *slave)
                // reason to search for new aggregator, and that we will find one
                if ((aggregator->lag_ports != port) || (aggregator->lag_ports->next_port_in_aggregator)) {
                        // find new aggregator for the related port(s)
-                       new_aggregator = __get_first_agg(port);
-                       for (; new_aggregator; new_aggregator = __get_next_agg(new_aggregator)) {
+                       bond_for_each_slave(bond, slave_iter, iter) {
+                               new_aggregator = &(SLAVE_AD_INFO(slave_iter).aggregator);
                                // if the new aggregator is empty, or it is connected to our port only
                                if (!new_aggregator->lag_ports
                                    || ((new_aggregator->lag_ports == port)
                                        && !new_aggregator->lag_ports->next_port_in_aggregator))
                                        break;
                        }
+                       if (!slave_iter)
+                               new_aggregator = NULL;
                        // if new aggregator found, copy the aggregator's parameters
                        // and connect the related lag_ports to the new aggregator
                        if ((new_aggregator) && ((!new_aggregator->lag_ports) || ((new_aggregator->lag_ports == port) && !new_aggregator->lag_ports->next_port_in_aggregator))) {
@@ -2056,15 +2006,17 @@ void bond_3ad_unbind_slave(struct slave *slave)
                                pr_info("%s: Removing an active aggregator\n",
                                        slave->bond->dev->name);
                                // select new active aggregator
-                               ad_agg_selection_logic(__get_first_agg(port));
+                               temp_aggregator = __get_first_agg(port);
+                               if (temp_aggregator)
+                                       ad_agg_selection_logic(temp_aggregator);
                        }
                }
        }
 
        pr_debug("Unbinding port %d\n", port->actor_port_number);
        // find the aggregator that this port is connected to
-       temp_aggregator = __get_first_agg(port);
-       for (; temp_aggregator; temp_aggregator = __get_next_agg(temp_aggregator)) {
+       bond_for_each_slave(bond, slave_iter, iter) {
+               temp_aggregator = &(SLAVE_AD_INFO(slave_iter).aggregator);
                prev_port = NULL;
                // search the port in the aggregator's related ports
                for (temp_port = temp_aggregator->lag_ports; temp_port;
@@ -2111,19 +2063,24 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
 {
        struct bonding *bond = container_of(work, struct bonding,
                                            ad_work.work);
-       struct port *port;
        struct aggregator *aggregator;
+       struct list_head *iter;
+       struct slave *slave;
+       struct port *port;
 
        read_lock(&bond->lock);
 
        //check if there are any slaves
-       if (list_empty(&bond->slave_list))
+       if (!bond_has_slaves(bond))
                goto re_arm;
 
        // check if agg_select_timer timer after initialize is timed out
        if (BOND_AD_INFO(bond).agg_select_timer && !(--BOND_AD_INFO(bond).agg_select_timer)) {
+               slave = bond_first_slave(bond);
+               port = slave ? &(SLAVE_AD_INFO(slave).port) : NULL;
+
                // select the active aggregator for the bond
-               if ((port = __get_first_port(bond))) {
+               if (port) {
                        if (!port->slave) {
                                pr_warning("%s: Warning: bond's first port is uninitialized\n",
                                           bond->dev->name);
@@ -2137,7 +2094,8 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
        }
 
        // for each port run the state machines
-       for (port = __get_first_port(bond); port; port = __get_next_port(port)) {
+       bond_for_each_slave(bond, slave, iter) {
+               port = &(SLAVE_AD_INFO(slave).port);
                if (!port->slave) {
                        pr_warning("%s: Warning: Found an uninitialized port\n",
                                   bond->dev->name);
@@ -2382,9 +2340,12 @@ int __bond_3ad_get_active_agg_info(struct bonding *bond,
                                   struct ad_info *ad_info)
 {
        struct aggregator *aggregator = NULL;
+       struct list_head *iter;
+       struct slave *slave;
        struct port *port;
 
-       for (port = __get_first_port(bond); port; port = __get_next_port(port)) {
+       bond_for_each_slave(bond, slave, iter) {
+               port = &(SLAVE_AD_INFO(slave).port);
                if (port->aggregator && port->aggregator->is_active) {
                        aggregator = port->aggregator;
                        break;
@@ -2417,14 +2378,15 @@ int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info)
 
 int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
 {
-       struct slave *slave, *start_at;
        struct bonding *bond = netdev_priv(dev);
-       int slave_agg_no;
-       int slaves_in_agg;
-       int agg_id;
-       int i;
+       struct slave *slave, *first_ok_slave;
+       struct aggregator *agg;
        struct ad_info ad_info;
+       struct list_head *iter;
+       int slaves_in_agg;
+       int slave_agg_no;
        int res = 1;
+       int agg_id;
 
        read_lock(&bond->lock);
        if (__bond_3ad_get_active_agg_info(bond, &ad_info)) {
@@ -2437,20 +2399,28 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
        agg_id = ad_info.aggregator_id;
 
        if (slaves_in_agg == 0) {
-               /*the aggregator is empty*/
                pr_debug("%s: Error: active aggregator is empty\n", dev->name);
                goto out;
        }
 
-       slave_agg_no = bond->xmit_hash_policy(skb, slaves_in_agg);
+       slave_agg_no = bond_xmit_hash(bond, skb, slaves_in_agg);
+       first_ok_slave = NULL;
 
-       bond_for_each_slave(bond, slave) {
-               struct aggregator *agg = SLAVE_AD_INFO(slave).port.aggregator;
+       bond_for_each_slave(bond, slave, iter) {
+               agg = SLAVE_AD_INFO(slave).port.aggregator;
+               if (!agg || agg->aggregator_identifier != agg_id)
+                       continue;
 
-               if (agg && (agg->aggregator_identifier == agg_id)) {
+               if (slave_agg_no >= 0) {
+                       if (!first_ok_slave && SLAVE_IS_OK(slave))
+                               first_ok_slave = slave;
                        slave_agg_no--;
-                       if (slave_agg_no < 0)
-                               break;
+                       continue;
+               }
+
+               if (SLAVE_IS_OK(slave)) {
+                       res = bond_dev_queue_xmit(bond, skb, slave->dev);
+                       goto out;
                }
        }
 
@@ -2460,20 +2430,10 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
                goto out;
        }
 
-       start_at = slave;
-
-       bond_for_each_slave_from(bond, slave, i, start_at) {
-               int slave_agg_id = 0;
-               struct aggregator *agg = SLAVE_AD_INFO(slave).port.aggregator;
-
-               if (agg)
-                       slave_agg_id = agg->aggregator_identifier;
-
-               if (SLAVE_IS_OK(slave) && agg && (slave_agg_id == agg_id)) {
-                       res = bond_dev_queue_xmit(bond, skb, slave->dev);
-                       break;
-               }
-       }
+       /* we couldn't find any suitable slave after the agg_no, so use the
+        * first suitable found, if found. */
+       if (first_ok_slave)
+               res = bond_dev_queue_xmit(bond, skb, first_ok_slave->dev);
 
 out:
        read_unlock(&bond->lock);
@@ -2515,11 +2475,12 @@ int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
 void bond_3ad_update_lacp_rate(struct bonding *bond)
 {
        struct port *port = NULL;
+       struct list_head *iter;
        struct slave *slave;
        int lacp_fast;
 
        lacp_fast = bond->params.lacp_fast;
-       bond_for_each_slave(bond, slave) {
+       bond_for_each_slave(bond, slave, iter) {
                port = &(SLAVE_AD_INFO(slave).port);
                __get_state_machine_lock(port);
                if (lacp_fast)
index f428ef57437279ec4bbf15e1c7e8e9b6a9da7a2c..e96041816b5b46eb6cb387ab0e5996530e98d0d0 100644 (file)
@@ -223,13 +223,14 @@ static long long compute_gap(struct slave *slave)
 static struct slave *tlb_get_least_loaded_slave(struct bonding *bond)
 {
        struct slave *slave, *least_loaded;
+       struct list_head *iter;
        long long max_gap;
 
        least_loaded = NULL;
        max_gap = LLONG_MIN;
 
        /* Find the slave with the largest gap */
-       bond_for_each_slave(bond, slave) {
+       bond_for_each_slave(bond, slave, iter) {
                if (SLAVE_IS_OK(slave)) {
                        long long gap = compute_gap(slave);
 
@@ -382,30 +383,31 @@ out:
 static struct slave *rlb_next_rx_slave(struct bonding *bond)
 {
        struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
-       struct slave *rx_slave, *slave, *start_at;
-       int i = 0;
-
-       if (bond_info->next_rx_slave)
-               start_at = bond_info->next_rx_slave;
-       else
-               start_at = bond_first_slave(bond);
-
-       rx_slave = NULL;
+       struct slave *before = NULL, *rx_slave = NULL, *slave;
+       struct list_head *iter;
+       bool found = false;
 
-       bond_for_each_slave_from(bond, slave, i, start_at) {
-               if (SLAVE_IS_OK(slave)) {
-                       if (!rx_slave) {
-                               rx_slave = slave;
-                       } else if (slave->speed > rx_slave->speed) {
+       bond_for_each_slave(bond, slave, iter) {
+               if (!SLAVE_IS_OK(slave))
+                       continue;
+               if (!found) {
+                       if (!before || before->speed < slave->speed)
+                               before = slave;
+               } else {
+                       if (!rx_slave || rx_slave->speed < slave->speed)
                                rx_slave = slave;
-                       }
                }
+               if (slave == bond_info->rx_slave)
+                       found = true;
        }
+       /* we didn't find anything after the current or we have something
+        * better before and up to the current slave
+        */
+       if (!rx_slave || (before && rx_slave->speed < before->speed))
+               rx_slave = before;
 
-       if (rx_slave) {
-               slave = bond_next_slave(bond, rx_slave);
-               bond_info->next_rx_slave = slave;
-       }
+       if (rx_slave)
+               bond_info->rx_slave = rx_slave;
 
        return rx_slave;
 }
@@ -1019,7 +1021,7 @@ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[])
 
        /* loop through vlans and send one packet for each */
        rcu_read_lock();
-       netdev_for_each_upper_dev_rcu(bond->dev, upper, iter) {
+       netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
                if (upper->priv_flags & IFF_802_1Q_VLAN)
                        alb_send_lp_vid(slave, mac_addr,
                                        vlan_dev_vlan_id(upper));
@@ -1172,10 +1174,11 @@ static void alb_change_hw_addr_on_detach(struct bonding *bond, struct slave *sla
  */
 static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slave *slave)
 {
-       struct slave *tmp_slave1, *free_mac_slave = NULL;
        struct slave *has_bond_addr = bond->curr_active_slave;
+       struct slave *tmp_slave1, *free_mac_slave = NULL;
+       struct list_head *iter;
 
-       if (list_empty(&bond->slave_list)) {
+       if (!bond_has_slaves(bond)) {
                /* this is the first slave */
                return 0;
        }
@@ -1196,7 +1199,7 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
        /* The slave's address is equal to the address of the bond.
         * Search for a spare address in the bond for this slave.
         */
-       bond_for_each_slave(bond, tmp_slave1) {
+       bond_for_each_slave(bond, tmp_slave1, iter) {
                if (!bond_slave_has_mac(bond, tmp_slave1->perm_hwaddr)) {
                        /* no slave has tmp_slave1's perm addr
                         * as its curr addr
@@ -1246,15 +1249,16 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
  */
 static int alb_set_mac_address(struct bonding *bond, void *addr)
 {
-       char tmp_addr[ETH_ALEN];
-       struct slave *slave;
+       struct slave *slave, *rollback_slave;
+       struct list_head *iter;
        struct sockaddr sa;
+       char tmp_addr[ETH_ALEN];
        int res;
 
        if (bond->alb_info.rlb_enabled)
                return 0;
 
-       bond_for_each_slave(bond, slave) {
+       bond_for_each_slave(bond, slave, iter) {
                /* save net_device's current hw address */
                memcpy(tmp_addr, slave->dev->dev_addr, ETH_ALEN);
 
@@ -1274,10 +1278,12 @@ unwind:
        sa.sa_family = bond->dev->type;
 
        /* unwind from head to the slave that failed */
-       bond_for_each_slave_continue_reverse(bond, slave) {
-               memcpy(tmp_addr, slave->dev->dev_addr, ETH_ALEN);
-               dev_set_mac_address(slave->dev, &sa);
-               memcpy(slave->dev->dev_addr, tmp_addr, ETH_ALEN);
+       bond_for_each_slave(bond, rollback_slave, iter) {
+               if (rollback_slave == slave)
+                       break;
+               memcpy(tmp_addr, rollback_slave->dev->dev_addr, ETH_ALEN);
+               dev_set_mac_address(rollback_slave->dev, &sa);
+               memcpy(rollback_slave->dev->dev_addr, tmp_addr, ETH_ALEN);
        }
 
        return res;
@@ -1458,11 +1464,12 @@ void bond_alb_monitor(struct work_struct *work)
        struct bonding *bond = container_of(work, struct bonding,
                                            alb_work.work);
        struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
+       struct list_head *iter;
        struct slave *slave;
 
        read_lock(&bond->lock);
 
-       if (list_empty(&bond->slave_list)) {
+       if (!bond_has_slaves(bond)) {
                bond_info->tx_rebalance_counter = 0;
                bond_info->lp_counter = 0;
                goto re_arm;
@@ -1480,7 +1487,7 @@ void bond_alb_monitor(struct work_struct *work)
                 */
                read_lock(&bond->curr_slave_lock);
 
-               bond_for_each_slave(bond, slave)
+               bond_for_each_slave(bond, slave, iter)
                        alb_send_learning_packets(slave, slave->dev->dev_addr);
 
                read_unlock(&bond->curr_slave_lock);
@@ -1493,7 +1500,7 @@ void bond_alb_monitor(struct work_struct *work)
 
                read_lock(&bond->curr_slave_lock);
 
-               bond_for_each_slave(bond, slave) {
+               bond_for_each_slave(bond, slave, iter) {
                        tlb_clear_slave(bond, slave, 1);
                        if (slave == bond->curr_active_slave) {
                                SLAVE_TLB_INFO(slave).load =
@@ -1599,13 +1606,13 @@ int bond_alb_init_slave(struct bonding *bond, struct slave *slave)
  */
 void bond_alb_deinit_slave(struct bonding *bond, struct slave *slave)
 {
-       if (!list_empty(&bond->slave_list))
+       if (bond_has_slaves(bond))
                alb_change_hw_addr_on_detach(bond, slave);
 
        tlb_clear_slave(bond, slave, 0);
 
        if (bond->alb_info.rlb_enabled) {
-               bond->alb_info.next_rx_slave = NULL;
+               bond->alb_info.rx_slave = NULL;
                rlb_clear_slave(bond, slave);
        }
 }
@@ -1669,7 +1676,7 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
        swap_slave = bond->curr_active_slave;
        rcu_assign_pointer(bond->curr_active_slave, new_slave);
 
-       if (!new_slave || list_empty(&bond->slave_list))
+       if (!new_slave || !bond_has_slaves(bond))
                return;
 
        /* set the new curr_active_slave to the bonds mac address
index c5eff5dafdfeab12ee4849fe75d9117d70c4859f..4226044efd083645db9229c0f88e507eae6d410b 100644 (file)
@@ -154,9 +154,7 @@ struct alb_bond_info {
        u8                      rx_ntt; /* flag - need to transmit
                                         * to all rx clients
                                         */
-       struct slave            *next_rx_slave;/* next slave to be assigned
-                                               * to a new rx client for
-                                               */
+       struct slave            *rx_slave;/* last slave to xmit from */
        u8                      primary_is_promisc;        /* boolean */
        u32                     rlb_promisc_timeout_counter;/* counts primary
                                                             * promiscuity time
index 55bbb8b8200c5bbd6949ab255015b3a23a174fc3..dfb4f6dd5de0603a5183b7d1f8addc5794b0a96e 100644 (file)
@@ -78,6 +78,7 @@
 #include <net/netns/generic.h>
 #include <net/pkt_sched.h>
 #include <linux/rculist.h>
+#include <net/flow_keys.h>
 #include "bonding.h"
 #include "bond_3ad.h"
 #include "bond_alb.h"
@@ -159,7 +160,8 @@ MODULE_PARM_DESC(min_links, "Minimum number of available links before turning on
 module_param(xmit_hash_policy, charp, 0);
 MODULE_PARM_DESC(xmit_hash_policy, "balance-xor and 802.3ad hashing method; "
                                   "0 for layer 2 (default), 1 for layer 3+4, "
-                                  "2 for layer 2+3");
+                                  "2 for layer 2+3, 3 for encap layer 2+3, "
+                                  "4 for encap layer 3+4");
 module_param(arp_interval, int, 0);
 MODULE_PARM_DESC(arp_interval, "arp interval in milliseconds");
 module_param_array(arp_ip_target, charp, NULL, 0);
@@ -217,6 +219,8 @@ const struct bond_parm_tbl xmit_hashtype_tbl[] = {
 {      "layer2",               BOND_XMIT_POLICY_LAYER2},
 {      "layer3+4",             BOND_XMIT_POLICY_LAYER34},
 {      "layer2+3",             BOND_XMIT_POLICY_LAYER23},
+{      "encap2+3",             BOND_XMIT_POLICY_ENCAP23},
+{      "encap3+4",             BOND_XMIT_POLICY_ENCAP34},
 {      NULL,                   -1},
 };
 
@@ -332,10 +336,11 @@ static int bond_vlan_rx_add_vid(struct net_device *bond_dev,
                                __be16 proto, u16 vid)
 {
        struct bonding *bond = netdev_priv(bond_dev);
-       struct slave *slave;
+       struct slave *slave, *rollback_slave;
+       struct list_head *iter;
        int res;
 
-       bond_for_each_slave(bond, slave) {
+       bond_for_each_slave(bond, slave, iter) {
                res = vlan_vid_add(slave->dev, proto, vid);
                if (res)
                        goto unwind;
@@ -344,9 +349,13 @@ static int bond_vlan_rx_add_vid(struct net_device *bond_dev,
        return 0;
 
 unwind:
-       /* unwind from the slave that failed */
-       bond_for_each_slave_continue_reverse(bond, slave)
-               vlan_vid_del(slave->dev, proto, vid);
+       /* unwind to the slave that failed */
+       bond_for_each_slave(bond, rollback_slave, iter) {
+               if (rollback_slave == slave)
+                       break;
+
+               vlan_vid_del(rollback_slave->dev, proto, vid);
+       }
 
        return res;
 }
@@ -360,9 +369,10 @@ static int bond_vlan_rx_kill_vid(struct net_device *bond_dev,
                                 __be16 proto, u16 vid)
 {
        struct bonding *bond = netdev_priv(bond_dev);
+       struct list_head *iter;
        struct slave *slave;
 
-       bond_for_each_slave(bond, slave)
+       bond_for_each_slave(bond, slave, iter)
                vlan_vid_del(slave->dev, proto, vid);
 
        if (bond_is_lb(bond))
@@ -382,15 +392,16 @@ static int bond_vlan_rx_kill_vid(struct net_device *bond_dev,
  */
 static int bond_set_carrier(struct bonding *bond)
 {
+       struct list_head *iter;
        struct slave *slave;
 
-       if (list_empty(&bond->slave_list))
+       if (!bond_has_slaves(bond))
                goto down;
 
        if (bond->params.mode == BOND_MODE_8023AD)
                return bond_3ad_set_carrier(bond);
 
-       bond_for_each_slave(bond, slave) {
+       bond_for_each_slave(bond, slave, iter) {
                if (slave->link == BOND_LINK_UP) {
                        if (!netif_carrier_ok(bond->dev)) {
                                netif_carrier_on(bond->dev);
@@ -522,7 +533,9 @@ static int bond_check_dev_link(struct bonding *bond,
  */
 static int bond_set_promiscuity(struct bonding *bond, int inc)
 {
+       struct list_head *iter;
        int err = 0;
+
        if (USES_PRIMARY(bond->params.mode)) {
                /* write lock already acquired */
                if (bond->curr_active_slave) {
@@ -532,7 +545,7 @@ static int bond_set_promiscuity(struct bonding *bond, int inc)
        } else {
                struct slave *slave;
 
-               bond_for_each_slave(bond, slave) {
+               bond_for_each_slave(bond, slave, iter) {
                        err = dev_set_promiscuity(slave->dev, inc);
                        if (err)
                                return err;
@@ -546,7 +559,9 @@ static int bond_set_promiscuity(struct bonding *bond, int inc)
  */
 static int bond_set_allmulti(struct bonding *bond, int inc)
 {
+       struct list_head *iter;
        int err = 0;
+
        if (USES_PRIMARY(bond->params.mode)) {
                /* write lock already acquired */
                if (bond->curr_active_slave) {
@@ -556,7 +571,7 @@ static int bond_set_allmulti(struct bonding *bond, int inc)
        } else {
                struct slave *slave;
 
-               bond_for_each_slave(bond, slave) {
+               bond_for_each_slave(bond, slave, iter) {
                        err = dev_set_allmulti(slave->dev, inc);
                        if (err)
                                return err;
@@ -774,43 +789,24 @@ static bool bond_should_change_active(struct bonding *bond)
 /**
  * find_best_interface - select the best available slave to be the active one
  * @bond: our bonding struct
- *
- * Warning: Caller must hold curr_slave_lock for writing.
  */
 static struct slave *bond_find_best_slave(struct bonding *bond)
 {
-       struct slave *new_active, *old_active;
-       struct slave *bestslave = NULL;
+       struct slave *slave, *bestslave = NULL;
+       struct list_head *iter;
        int mintime = bond->params.updelay;
-       int i;
-
-       new_active = bond->curr_active_slave;
-
-       if (!new_active) { /* there were no active slaves left */
-               new_active = bond_first_slave(bond);
-               if (!new_active)
-                       return NULL; /* still no slave, return NULL */
-       }
 
-       if ((bond->primary_slave) &&
-           bond->primary_slave->link == BOND_LINK_UP &&
-           bond_should_change_active(bond)) {
-               new_active = bond->primary_slave;
-       }
-
-       /* remember where to stop iterating over the slaves */
-       old_active = new_active;
-
-       bond_for_each_slave_from(bond, new_active, i, old_active) {
-               if (new_active->link == BOND_LINK_UP) {
-                       return new_active;
-               } else if (new_active->link == BOND_LINK_BACK &&
-                          IS_UP(new_active->dev)) {
-                       /* link up, but waiting for stabilization */
-                       if (new_active->delay < mintime) {
-                               mintime = new_active->delay;
-                               bestslave = new_active;
-                       }
+       if (bond->primary_slave && bond->primary_slave->link == BOND_LINK_UP &&
+           bond_should_change_active(bond))
+               return bond->primary_slave;
+
+       bond_for_each_slave(bond, slave, iter) {
+               if (slave->link == BOND_LINK_UP)
+                       return slave;
+               if (slave->link == BOND_LINK_BACK && IS_UP(slave->dev) &&
+                   slave->delay < mintime) {
+                       mintime = slave->delay;
+                       bestslave = slave;
                }
        }
 
@@ -980,7 +976,6 @@ void bond_select_active_slave(struct bonding *bond)
  */
 static void bond_attach_slave(struct bonding *bond, struct slave *new_slave)
 {
-       list_add_tail_rcu(&new_slave->list, &bond->slave_list);
        bond->slave_cnt++;
 }
 
@@ -996,7 +991,6 @@ static void bond_attach_slave(struct bonding *bond, struct slave *new_slave)
  */
 static void bond_detach_slave(struct bonding *bond, struct slave *slave)
 {
-       list_del_rcu(&slave->list);
        bond->slave_cnt--;
 }
 
@@ -1046,9 +1040,10 @@ static void bond_poll_controller(struct net_device *bond_dev)
 static void bond_netpoll_cleanup(struct net_device *bond_dev)
 {
        struct bonding *bond = netdev_priv(bond_dev);
+       struct list_head *iter;
        struct slave *slave;
 
-       bond_for_each_slave(bond, slave)
+       bond_for_each_slave(bond, slave, iter)
                if (IS_UP(slave->dev))
                        slave_disable_netpoll(slave);
 }
@@ -1056,10 +1051,11 @@ static void bond_netpoll_cleanup(struct net_device *bond_dev)
 static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni, gfp_t gfp)
 {
        struct bonding *bond = netdev_priv(dev);
+       struct list_head *iter;
        struct slave *slave;
        int err = 0;
 
-       bond_for_each_slave(bond, slave) {
+       bond_for_each_slave(bond, slave, iter) {
                err = slave_enable_netpoll(slave);
                if (err) {
                        bond_netpoll_cleanup(dev);
@@ -1087,10 +1083,11 @@ static netdev_features_t bond_fix_features(struct net_device *dev,
                                           netdev_features_t features)
 {
        struct bonding *bond = netdev_priv(dev);
+       struct list_head *iter;
        netdev_features_t mask;
        struct slave *slave;
 
-       if (list_empty(&bond->slave_list)) {
+       if (!bond_has_slaves(bond)) {
                /* Disable adding VLANs to empty bond. But why? --mq */
                features |= NETIF_F_VLAN_CHALLENGED;
                return features;
@@ -1100,7 +1097,7 @@ static netdev_features_t bond_fix_features(struct net_device *dev,
        features &= ~NETIF_F_ONE_FOR_ALL;
        features |= NETIF_F_ALL_FOR_ALL;
 
-       bond_for_each_slave(bond, slave) {
+       bond_for_each_slave(bond, slave, iter) {
                features = netdev_increment_features(features,
                                                     slave->dev->features,
                                                     mask);
@@ -1118,16 +1115,17 @@ static void bond_compute_features(struct bonding *bond)
 {
        unsigned int flags, dst_release_flag = IFF_XMIT_DST_RELEASE;
        netdev_features_t vlan_features = BOND_VLAN_FEATURES;
+       struct net_device *bond_dev = bond->dev;
+       struct list_head *iter;
+       struct slave *slave;
        unsigned short max_hard_header_len = ETH_HLEN;
        unsigned int gso_max_size = GSO_MAX_SIZE;
-       struct net_device *bond_dev = bond->dev;
        u16 gso_max_segs = GSO_MAX_SEGS;
-       struct slave *slave;
 
-       if (list_empty(&bond->slave_list))
+       if (!bond_has_slaves(bond))
                goto done;
 
-       bond_for_each_slave(bond, slave) {
+       bond_for_each_slave(bond, slave, iter) {
                vlan_features = netdev_increment_features(vlan_features,
                        slave->dev->vlan_features, BOND_VLAN_FEATURES);
 
@@ -1233,11 +1231,12 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
 }
 
 static int bond_master_upper_dev_link(struct net_device *bond_dev,
-                                     struct net_device *slave_dev)
+                                     struct net_device *slave_dev,
+                                     struct slave *slave)
 {
        int err;
 
-       err = netdev_master_upper_dev_link(slave_dev, bond_dev);
+       err = netdev_master_upper_dev_link_private(slave_dev, bond_dev, slave);
        if (err)
                return err;
        slave_dev->flags |= IFF_SLAVE;
@@ -1258,7 +1257,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
 {
        struct bonding *bond = netdev_priv(bond_dev);
        const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
-       struct slave *new_slave = NULL;
+       struct slave *new_slave = NULL, *prev_slave;
        struct sockaddr addr;
        int link_reporting;
        int res = 0, i;
@@ -1313,7 +1312,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
         * bond ether type mutual exclusion - don't allow slaves of dissimilar
         * ether type (eg ARPHRD_ETHER and ARPHRD_INFINIBAND) share the same bond
         */
-       if (list_empty(&bond->slave_list)) {
+       if (!bond_has_slaves(bond)) {
                if (bond_dev->type != slave_dev->type) {
                        pr_debug("%s: change device type from %d to %d\n",
                                 bond_dev->name,
@@ -1352,7 +1351,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
        }
 
        if (slave_ops->ndo_set_mac_address == NULL) {
-               if (list_empty(&bond->slave_list)) {
+               if (!bond_has_slaves(bond)) {
                        pr_warning("%s: Warning: The first slave device specified does not support setting the MAC address. Setting fail_over_mac to active.",
                                   bond_dev->name);
                        bond->params.fail_over_mac = BOND_FOM_ACTIVE;
@@ -1368,7 +1367,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
 
        /* If this is the first slave, then we need to set the master's hardware
         * address to be the same as the slave's. */
-       if (list_empty(&bond->slave_list) &&
+       if (!bond_has_slaves(bond) &&
            bond->dev->addr_assign_type == NET_ADDR_RANDOM)
                bond_set_dev_addr(bond->dev, slave_dev);
 
@@ -1377,7 +1376,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                res = -ENOMEM;
                goto err_undo_flags;
        }
-       INIT_LIST_HEAD(&new_slave->list);
        /*
         * Set the new_slave's queue_id to be zero.  Queue ID mapping
         * is set via sysfs or module option if desired.
@@ -1413,17 +1411,11 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                }
        }
 
-       res = bond_master_upper_dev_link(bond_dev, slave_dev);
-       if (res) {
-               pr_debug("Error %d calling bond_master_upper_dev_link\n", res);
-               goto err_restore_mac;
-       }
-
        /* open the slave since the application closed it */
        res = dev_open(slave_dev);
        if (res) {
                pr_debug("Opening slave %s failed\n", slave_dev->name);
-               goto err_unset_master;
+               goto err_restore_mac;
        }
 
        new_slave->bond = bond;
@@ -1481,6 +1473,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
 
        write_lock_bh(&bond->lock);
 
+       prev_slave = bond_last_slave(bond);
        bond_attach_slave(bond, new_slave);
 
        new_slave->delay = 0;
@@ -1568,16 +1561,13 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                 */
                bond_set_slave_inactive_flags(new_slave);
                /* if this is the first slave */
-               if (bond_first_slave(bond) == new_slave) {
+               if (!prev_slave) {
                        SLAVE_AD_INFO(new_slave).id = 1;
                        /* Initialize AD with the number of times that the AD timer is called in 1 second
                         * can be called only after the mac address of the bond is set
                         */
                        bond_3ad_initialize(bond, 1000/AD_TIMER_INTERVAL);
                } else {
-                       struct slave *prev_slave;
-
-                       prev_slave = bond_prev_slave(bond, new_slave);
                        SLAVE_AD_INFO(new_slave).id =
                                SLAVE_AD_INFO(prev_slave).id + 1;
                }
@@ -1626,17 +1616,20 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
 
        read_unlock(&bond->lock);
 
-       res = bond_create_slave_symlinks(bond_dev, slave_dev);
-       if (res)
-               goto err_detach;
-
        res = netdev_rx_handler_register(slave_dev, bond_handle_frame,
                                         new_slave);
        if (res) {
                pr_debug("Error %d calling netdev_rx_handler_register\n", res);
-               goto err_dest_symlinks;
+               goto err_detach;
+       }
+
+       res = bond_master_upper_dev_link(bond_dev, slave_dev, new_slave);
+       if (res) {
+               pr_debug("Error %d calling bond_master_upper_dev_link\n", res);
+               goto err_unregister;
        }
 
+
        pr_info("%s: enslaving %s as a%s interface with a%s link.\n",
                bond_dev->name, slave_dev->name,
                bond_is_active_slave(new_slave) ? "n active" : " backup",
@@ -1646,8 +1639,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
        return 0;
 
 /* Undo stages on error */
-err_dest_symlinks:
-       bond_destroy_slave_symlinks(bond_dev, slave_dev);
+err_unregister:
+       netdev_rx_handler_unregister(slave_dev);
 
 err_detach:
        if (!USES_PRIMARY(bond->params.mode))
@@ -1675,9 +1668,6 @@ err_close:
        slave_dev->priv_flags &= ~IFF_BONDING;
        dev_close(slave_dev);
 
-err_unset_master:
-       bond_upper_dev_unlink(bond_dev, slave_dev);
-
 err_restore_mac:
        if (!bond->params.fail_over_mac) {
                /* XXX TODO - fom follow mode needs to change master's
@@ -1698,7 +1688,7 @@ err_free:
 err_undo_flags:
        bond_compute_features(bond);
        /* Enslave of first slave has failed and we need to fix master's mac */
-       if (list_empty(&bond->slave_list) &&
+       if (!bond_has_slaves(bond) &&
            ether_addr_equal(bond_dev->dev_addr, slave_dev->dev_addr))
                eth_hw_addr_random(bond_dev);
 
@@ -1724,6 +1714,7 @@ static int __bond_release_one(struct net_device *bond_dev,
        struct bonding *bond = netdev_priv(bond_dev);
        struct slave *slave, *oldcurrent;
        struct sockaddr addr;
+       int old_flags = bond_dev->flags;
        netdev_features_t old_features = bond_dev->features;
 
        /* slave is not a slave or master is not master of this slave */
@@ -1748,6 +1739,8 @@ static int __bond_release_one(struct net_device *bond_dev,
        }
 
        write_unlock_bh(&bond->lock);
+
+       bond_upper_dev_unlink(bond_dev, slave_dev);
        /* unregister rx_handler early so bond_handle_frame wouldn't be called
         * for this slave anymore.
         */
@@ -1776,7 +1769,7 @@ static int __bond_release_one(struct net_device *bond_dev,
 
        if (!all && !bond->params.fail_over_mac) {
                if (ether_addr_equal(bond_dev->dev_addr, slave->perm_hwaddr) &&
-                   !list_empty(&bond->slave_list))
+                   bond_has_slaves(bond))
                        pr_warn("%s: Warning: the permanent HWaddr of %s - %pM - is still in use by %s. Set the HWaddr of %s to a different address to avoid conflicts.\n",
                                   bond_dev->name, slave_dev->name,
                                   slave->perm_hwaddr,
@@ -1819,7 +1812,7 @@ static int __bond_release_one(struct net_device *bond_dev,
                write_lock_bh(&bond->lock);
        }
 
-       if (list_empty(&bond->slave_list)) {
+       if (!bond_has_slaves(bond)) {
                bond_set_carrier(bond);
                eth_hw_addr_random(bond_dev);
 
@@ -1835,7 +1828,7 @@ static int __bond_release_one(struct net_device *bond_dev,
        unblock_netpoll_tx();
        synchronize_rcu();
 
-       if (list_empty(&bond->slave_list)) {
+       if (!bond_has_slaves(bond)) {
                call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev);
                call_netdevice_notifiers(NETDEV_RELEASE, bond->dev);
        }
@@ -1847,27 +1840,29 @@ static int __bond_release_one(struct net_device *bond_dev,
                        bond_dev->name, slave_dev->name, bond_dev->name);
 
        /* must do this from outside any spinlocks */
-       bond_destroy_slave_symlinks(bond_dev, slave_dev);
-
        vlan_vids_del_by_dev(slave_dev, bond_dev);
 
        /* If the mode USES_PRIMARY, then this cases was handled above by
         * bond_change_active_slave(..., NULL)
         */
        if (!USES_PRIMARY(bond->params.mode)) {
-               /* unset promiscuity level from slave */
-               if (bond_dev->flags & IFF_PROMISC)
+               /* unset promiscuity level from slave
+                * NOTE: The NETDEV_CHANGEADDR call above may change the value
+                * of the IFF_PROMISC flag in the bond_dev, but we need the
+                * value of that flag before that change, as that was the value
+                * when this slave was attached, so we cache at the start of the
+                * function and use it here. Same goes for ALLMULTI below
+                */
+               if (old_flags & IFF_PROMISC)
                        dev_set_promiscuity(slave_dev, -1);
 
                /* unset allmulti level from slave */
-               if (bond_dev->flags & IFF_ALLMULTI)
+               if (old_flags & IFF_ALLMULTI)
                        dev_set_allmulti(slave_dev, -1);
 
                bond_hw_addr_flush(bond_dev, slave_dev);
        }
 
-       bond_upper_dev_unlink(bond_dev, slave_dev);
-
        slave_disable_netpoll(slave);
 
        /* close slave before restoring its mac address */
@@ -1906,7 +1901,7 @@ static int  bond_release_and_destroy(struct net_device *bond_dev,
        int ret;
 
        ret = bond_release(bond_dev, slave_dev);
-       if (ret == 0 && list_empty(&bond->slave_list)) {
+       if (ret == 0 && !bond_has_slaves(bond)) {
                bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
                pr_info("%s: destroying bond %s.\n",
                        bond_dev->name, bond_dev->name);
@@ -1987,11 +1982,12 @@ static int bond_info_query(struct net_device *bond_dev, struct ifbond *info)
 static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *info)
 {
        struct bonding *bond = netdev_priv(bond_dev);
+       struct list_head *iter;
        int i = 0, res = -ENODEV;
        struct slave *slave;
 
        read_lock(&bond->lock);
-       bond_for_each_slave(bond, slave) {
+       bond_for_each_slave(bond, slave, iter) {
                if (i++ == (int)info->slave_id) {
                        res = 0;
                        strcpy(info->slave_name, slave->dev->name);
@@ -2012,12 +2008,13 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in
 static int bond_miimon_inspect(struct bonding *bond)
 {
        int link_state, commit = 0;
+       struct list_head *iter;
        struct slave *slave;
        bool ignore_updelay;
 
        ignore_updelay = !bond->curr_active_slave ? true : false;
 
-       bond_for_each_slave(bond, slave) {
+       bond_for_each_slave(bond, slave, iter) {
                slave->new_link = BOND_LINK_NOCHANGE;
 
                link_state = bond_check_dev_link(bond, slave->dev, 0);
@@ -2111,9 +2108,10 @@ static int bond_miimon_inspect(struct bonding *bond)
 
 static void bond_miimon_commit(struct bonding *bond)
 {
+       struct list_head *iter;
        struct slave *slave;
 
-       bond_for_each_slave(bond, slave) {
+       bond_for_each_slave(bond, slave, iter) {
                switch (slave->new_link) {
                case BOND_LINK_NOCHANGE:
                        continue;
@@ -2218,7 +2216,7 @@ void bond_mii_monitor(struct work_struct *work)
 
        delay = msecs_to_jiffies(bond->params.miimon);
 
-       if (list_empty(&bond->slave_list))
+       if (!bond_has_slaves(bond))
                goto re_arm;
 
        should_notify_peers = bond_should_notify_peers(bond);
@@ -2267,7 +2265,7 @@ static bool bond_has_this_ip(struct bonding *bond, __be32 ip)
                return true;
 
        rcu_read_lock();
-       netdev_for_each_upper_dev_rcu(bond->dev, upper, iter) {
+       netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
                if (ip == bond_confirm_addr(upper, 0, ip)) {
                        ret = true;
                        break;
@@ -2342,10 +2340,12 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
                 *
                 * TODO: QinQ?
                 */
-               netdev_for_each_upper_dev_rcu(bond->dev, vlan_upper, vlan_iter) {
+               netdev_for_each_all_upper_dev_rcu(bond->dev, vlan_upper,
+                                                 vlan_iter) {
                        if (!is_vlan_dev(vlan_upper))
                                continue;
-                       netdev_for_each_upper_dev_rcu(vlan_upper, upper, iter) {
+                       netdev_for_each_all_upper_dev_rcu(vlan_upper, upper,
+                                                         iter) {
                                if (upper == rt->dst.dev) {
                                        vlan_id = vlan_dev_vlan_id(vlan_upper);
                                        rcu_read_unlock();
@@ -2358,7 +2358,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
                 * our upper vlans, then just search for any dev that
                 * matches, and in case it's a vlan - save the id
                 */
-               netdev_for_each_upper_dev_rcu(bond->dev, upper, iter) {
+               netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
                        if (upper == rt->dst.dev) {
                                /* if it's a vlan - get its VID */
                                if (is_vlan_dev(upper))
@@ -2505,11 +2505,12 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
        struct bonding *bond = container_of(work, struct bonding,
                                            arp_work.work);
        struct slave *slave, *oldcurrent;
+       struct list_head *iter;
        int do_failover = 0;
 
        read_lock(&bond->lock);
 
-       if (list_empty(&bond->slave_list))
+       if (!bond_has_slaves(bond))
                goto re_arm;
 
        oldcurrent = bond->curr_active_slave;
@@ -2521,7 +2522,7 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
         * TODO: what about up/down delay in arp mode? it wasn't here before
         *       so it can wait
         */
-       bond_for_each_slave(bond, slave) {
+       bond_for_each_slave(bond, slave, iter) {
                unsigned long trans_start = dev_trans_start(slave->dev);
 
                if (slave->link != BOND_LINK_UP) {
@@ -2612,10 +2613,11 @@ re_arm:
 static int bond_ab_arp_inspect(struct bonding *bond)
 {
        unsigned long trans_start, last_rx;
+       struct list_head *iter;
        struct slave *slave;
        int commit = 0;
 
-       bond_for_each_slave(bond, slave) {
+       bond_for_each_slave(bond, slave, iter) {
                slave->new_link = BOND_LINK_NOCHANGE;
                last_rx = slave_last_rx(bond, slave);
 
@@ -2682,9 +2684,10 @@ static int bond_ab_arp_inspect(struct bonding *bond)
 static void bond_ab_arp_commit(struct bonding *bond)
 {
        unsigned long trans_start;
+       struct list_head *iter;
        struct slave *slave;
 
-       bond_for_each_slave(bond, slave) {
+       bond_for_each_slave(bond, slave, iter) {
                switch (slave->new_link) {
                case BOND_LINK_NOCHANGE:
                        continue;
@@ -2755,8 +2758,9 @@ do_failover:
  */
 static void bond_ab_arp_probe(struct bonding *bond)
 {
-       struct slave *slave, *next_slave;
-       int i;
+       struct slave *slave, *before = NULL, *new_slave = NULL;
+       struct list_head *iter;
+       bool found = false;
 
        read_lock(&bond->curr_slave_lock);
 
@@ -2786,18 +2790,12 @@ static void bond_ab_arp_probe(struct bonding *bond)
 
        bond_set_slave_inactive_flags(bond->current_arp_slave);
 
-       /* search for next candidate */
-       next_slave = bond_next_slave(bond, bond->current_arp_slave);
-       bond_for_each_slave_from(bond, slave, i, next_slave) {
-               if (IS_UP(slave->dev)) {
-                       slave->link = BOND_LINK_BACK;
-                       bond_set_slave_active_flags(slave);
-                       bond_arp_send_all(bond, slave);
-                       slave->jiffies = jiffies;
-                       bond->current_arp_slave = slave;
-                       break;
-               }
+       bond_for_each_slave(bond, slave, iter) {
+               if (!found && !before && IS_UP(slave->dev))
+                       before = slave;
 
+               if (found && !new_slave && IS_UP(slave->dev))
+                       new_slave = slave;
                /* if the link state is up at this point, we
                 * mark it down - this can happen if we have
                 * simultaneous link failures and
@@ -2805,7 +2803,7 @@ static void bond_ab_arp_probe(struct bonding *bond)
                 * one the current slave so it is still marked
                 * up when it is actually down
                 */
-               if (slave->link == BOND_LINK_UP) {
+               if (!IS_UP(slave->dev) && slave->link == BOND_LINK_UP) {
                        slave->link = BOND_LINK_DOWN;
                        if (slave->link_failure_count < UINT_MAX)
                                slave->link_failure_count++;
@@ -2815,7 +2813,22 @@ static void bond_ab_arp_probe(struct bonding *bond)
                        pr_info("%s: backup interface %s is now down.\n",
                                bond->dev->name, slave->dev->name);
                }
+               if (slave == bond->current_arp_slave)
+                       found = true;
        }
+
+       if (!new_slave && before)
+               new_slave = before;
+
+       if (!new_slave)
+               return;
+
+       new_slave->link = BOND_LINK_BACK;
+       bond_set_slave_active_flags(new_slave);
+       bond_arp_send_all(bond, new_slave);
+       new_slave->jiffies = jiffies;
+       bond->current_arp_slave = new_slave;
+
 }
 
 void bond_activebackup_arp_mon(struct work_struct *work)
@@ -2829,7 +2842,7 @@ void bond_activebackup_arp_mon(struct work_struct *work)
 
        delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
 
-       if (list_empty(&bond->slave_list))
+       if (!bond_has_slaves(bond))
                goto re_arm;
 
        should_notify_peers = bond_should_notify_peers(bond);
@@ -3026,99 +3039,85 @@ static struct notifier_block bond_netdev_notifier = {
 
 /*---------------------------- Hashing Policies -----------------------------*/
 
-/*
- * Hash for the output device based upon layer 2 data
- */
-static int bond_xmit_hash_policy_l2(struct sk_buff *skb, int count)
+/* L2 hash helper */
+static inline u32 bond_eth_hash(struct sk_buff *skb)
 {
        struct ethhdr *data = (struct ethhdr *)skb->data;
 
        if (skb_headlen(skb) >= offsetof(struct ethhdr, h_proto))
-               return (data->h_dest[5] ^ data->h_source[5]) % count;
+               return data->h_dest[5] ^ data->h_source[5];
 
        return 0;
 }
 
-/*
- * Hash for the output device based upon layer 2 and layer 3 data. If
- * the packet is not IP, fall back on bond_xmit_hash_policy_l2()
- */
-static int bond_xmit_hash_policy_l23(struct sk_buff *skb, int count)
+/* Extract the appropriate headers based on bond's xmit policy */
+static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
+                             struct flow_keys *fk)
 {
-       const struct ethhdr *data;
+       const struct ipv6hdr *iph6;
        const struct iphdr *iph;
-       const struct ipv6hdr *ipv6h;
-       u32 v6hash;
-       const __be32 *s, *d;
+       int noff, proto = -1;
 
-       if (skb->protocol == htons(ETH_P_IP) &&
-           pskb_network_may_pull(skb, sizeof(*iph))) {
+       if (bond->params.xmit_policy > BOND_XMIT_POLICY_LAYER23)
+               return skb_flow_dissect(skb, fk);
+
+       fk->ports = 0;
+       noff = skb_network_offset(skb);
+       if (skb->protocol == htons(ETH_P_IP)) {
+               if (!pskb_may_pull(skb, noff + sizeof(*iph)))
+                       return false;
                iph = ip_hdr(skb);
-               data = (struct ethhdr *)skb->data;
-               return ((ntohl(iph->saddr ^ iph->daddr) & 0xffff) ^
-                       (data->h_dest[5] ^ data->h_source[5])) % count;
-       } else if (skb->protocol == htons(ETH_P_IPV6) &&
-                  pskb_network_may_pull(skb, sizeof(*ipv6h))) {
-               ipv6h = ipv6_hdr(skb);
-               data = (struct ethhdr *)skb->data;
-               s = &ipv6h->saddr.s6_addr32[0];
-               d = &ipv6h->daddr.s6_addr32[0];
-               v6hash = (s[1] ^ d[1]) ^ (s[2] ^ d[2]) ^ (s[3] ^ d[3]);
-               v6hash ^= (v6hash >> 24) ^ (v6hash >> 16) ^ (v6hash >> 8);
-               return (v6hash ^ data->h_dest[5] ^ data->h_source[5]) % count;
-       }
-
-       return bond_xmit_hash_policy_l2(skb, count);
+               fk->src = iph->saddr;
+               fk->dst = iph->daddr;
+               noff += iph->ihl << 2;
+               if (!ip_is_fragment(iph))
+                       proto = iph->protocol;
+       } else if (skb->protocol == htons(ETH_P_IPV6)) {
+               if (!pskb_may_pull(skb, noff + sizeof(*iph6)))
+                       return false;
+               iph6 = ipv6_hdr(skb);
+               fk->src = (__force __be32)ipv6_addr_hash(&iph6->saddr);
+               fk->dst = (__force __be32)ipv6_addr_hash(&iph6->daddr);
+               noff += sizeof(*iph6);
+               proto = iph6->nexthdr;
+       } else {
+               return false;
+       }
+       if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34 && proto >= 0)
+               fk->ports = skb_flow_get_ports(skb, noff, proto);
+
+       return true;
 }
 
-/*
- * Hash for the output device based upon layer 3 and layer 4 data. If
- * the packet is a frag or not TCP or UDP, just use layer 3 data.  If it is
- * altogether not IP, fall back on bond_xmit_hash_policy_l2()
+/**
+ * bond_xmit_hash - generate a hash value based on the xmit policy
+ * @bond: bonding device
+ * @skb: buffer to use for headers
+ * @count: modulo value
+ *
+ * This function will extract the necessary headers from the skb buffer and use
+ * them to generate a hash based on the xmit_policy set in the bonding device
+ * which will be reduced modulo count before returning.
  */
-static int bond_xmit_hash_policy_l34(struct sk_buff *skb, int count)
+int bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, int count)
 {
-       u32 layer4_xor = 0;
-       const struct iphdr *iph;
-       const struct ipv6hdr *ipv6h;
-       const __be32 *s, *d;
-       const __be16 *l4 = NULL;
-       __be16 _l4[2];
-       int noff = skb_network_offset(skb);
-       int poff;
-
-       if (skb->protocol == htons(ETH_P_IP) &&
-           pskb_may_pull(skb, noff + sizeof(*iph))) {
-               iph = ip_hdr(skb);
-               poff = proto_ports_offset(iph->protocol);
+       struct flow_keys flow;
+       u32 hash;
 
-               if (!ip_is_fragment(iph) && poff >= 0) {
-                       l4 = skb_header_pointer(skb, noff + (iph->ihl << 2) + poff,
-                                               sizeof(_l4), &_l4);
-                       if (l4)
-                               layer4_xor = ntohs(l4[0] ^ l4[1]);
-               }
-               return (layer4_xor ^
-                       ((ntohl(iph->saddr ^ iph->daddr)) & 0xffff)) % count;
-       } else if (skb->protocol == htons(ETH_P_IPV6) &&
-                  pskb_may_pull(skb, noff + sizeof(*ipv6h))) {
-               ipv6h = ipv6_hdr(skb);
-               poff = proto_ports_offset(ipv6h->nexthdr);
-               if (poff >= 0) {
-                       l4 = skb_header_pointer(skb, noff + sizeof(*ipv6h) + poff,
-                                               sizeof(_l4), &_l4);
-                       if (l4)
-                               layer4_xor = ntohs(l4[0] ^ l4[1]);
-               }
-               s = &ipv6h->saddr.s6_addr32[0];
-               d = &ipv6h->daddr.s6_addr32[0];
-               layer4_xor ^= (s[1] ^ d[1]) ^ (s[2] ^ d[2]) ^ (s[3] ^ d[3]);
-               layer4_xor ^= (layer4_xor >> 24) ^ (layer4_xor >> 16) ^
-                              (layer4_xor >> 8);
-               return layer4_xor % count;
-       }
+       if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER2 ||
+           !bond_flow_dissect(bond, skb, &flow))
+               return bond_eth_hash(skb) % count;
 
-       return bond_xmit_hash_policy_l2(skb, count);
+       if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER23 ||
+           bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP23)
+               hash = bond_eth_hash(skb);
+       else
+               hash = (__force u32)flow.ports;
+       hash ^= (__force u32)flow.dst ^ (__force u32)flow.src;
+       hash ^= (hash >> 16);
+       hash ^= (hash >> 8);
+
+       return hash % count;
 }
 
 /*-------------------------- Device entry points ----------------------------*/
@@ -3148,13 +3147,14 @@ static void bond_work_cancel_all(struct bonding *bond)
 static int bond_open(struct net_device *bond_dev)
 {
        struct bonding *bond = netdev_priv(bond_dev);
+       struct list_head *iter;
        struct slave *slave;
 
        /* reset slave->backup and slave->inactive */
        read_lock(&bond->lock);
-       if (!list_empty(&bond->slave_list)) {
+       if (bond_has_slaves(bond)) {
                read_lock(&bond->curr_slave_lock);
-               bond_for_each_slave(bond, slave) {
+               bond_for_each_slave(bond, slave, iter) {
                        if ((bond->params.mode == BOND_MODE_ACTIVEBACKUP)
                                && (slave != bond->curr_active_slave)) {
                                bond_set_slave_inactive_flags(slave);
@@ -3214,12 +3214,13 @@ static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
 {
        struct bonding *bond = netdev_priv(bond_dev);
        struct rtnl_link_stats64 temp;
+       struct list_head *iter;
        struct slave *slave;
 
        memset(stats, 0, sizeof(*stats));
 
        read_lock_bh(&bond->lock);
-       bond_for_each_slave(bond, slave) {
+       bond_for_each_slave(bond, slave, iter) {
                const struct rtnl_link_stats64 *sstats =
                        dev_get_stats(slave->dev, &temp);
 
@@ -3386,22 +3387,24 @@ static void bond_change_rx_flags(struct net_device *bond_dev, int change)
 static void bond_set_rx_mode(struct net_device *bond_dev)
 {
        struct bonding *bond = netdev_priv(bond_dev);
+       struct list_head *iter;
        struct slave *slave;
 
-       ASSERT_RTNL();
 
+       rcu_read_lock();
        if (USES_PRIMARY(bond->params.mode)) {
-               slave = rtnl_dereference(bond->curr_active_slave);
+               slave = rcu_dereference(bond->curr_active_slave);
                if (slave) {
                        dev_uc_sync(slave->dev, bond_dev);
                        dev_mc_sync(slave->dev, bond_dev);
                }
        } else {
-               bond_for_each_slave(bond, slave) {
+               bond_for_each_slave_rcu(bond, slave, iter) {
                        dev_uc_sync_multiple(slave->dev, bond_dev);
                        dev_mc_sync_multiple(slave->dev, bond_dev);
                }
        }
+       rcu_read_unlock();
 }
 
 static int bond_neigh_init(struct neighbour *n)
@@ -3464,7 +3467,8 @@ static int bond_neigh_setup(struct net_device *dev,
 static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
 {
        struct bonding *bond = netdev_priv(bond_dev);
-       struct slave *slave;
+       struct slave *slave, *rollback_slave;
+       struct list_head *iter;
        int res = 0;
 
        pr_debug("bond=%p, name=%s, new_mtu=%d\n", bond,
@@ -3485,10 +3489,9 @@ static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
         * call to the base driver.
         */
 
-       bond_for_each_slave(bond, slave) {
-               pr_debug("s %p s->p %p c_m %p\n",
+       bond_for_each_slave(bond, slave, iter) {
+               pr_debug("s %p c_m %p\n",
                         slave,
-                        bond_prev_slave(bond, slave),
                         slave->dev->netdev_ops->ndo_change_mtu);
 
                res = dev_set_mtu(slave->dev, new_mtu);
@@ -3513,13 +3516,16 @@ static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
 
 unwind:
        /* unwind from head to the slave that failed */
-       bond_for_each_slave_continue_reverse(bond, slave) {
+       bond_for_each_slave(bond, rollback_slave, iter) {
                int tmp_res;
 
-               tmp_res = dev_set_mtu(slave->dev, bond_dev->mtu);
+               if (rollback_slave == slave)
+                       break;
+
+               tmp_res = dev_set_mtu(rollback_slave->dev, bond_dev->mtu);
                if (tmp_res) {
                        pr_debug("unwind err %d dev %s\n",
-                                tmp_res, slave->dev->name);
+                                tmp_res, rollback_slave->dev->name);
                }
        }
 
@@ -3536,8 +3542,9 @@ unwind:
 static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
 {
        struct bonding *bond = netdev_priv(bond_dev);
+       struct slave *slave, *rollback_slave;
        struct sockaddr *sa = addr, tmp_sa;
-       struct slave *slave;
+       struct list_head *iter;
        int res = 0;
 
        if (bond->params.mode == BOND_MODE_ALB)
@@ -3571,7 +3578,7 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
         * call to the base driver.
         */
 
-       bond_for_each_slave(bond, slave) {
+       bond_for_each_slave(bond, slave, iter) {
                const struct net_device_ops *slave_ops = slave->dev->netdev_ops;
                pr_debug("slave %p %s\n", slave, slave->dev->name);
 
@@ -3603,13 +3610,16 @@ unwind:
        tmp_sa.sa_family = bond_dev->type;
 
        /* unwind from head to the slave that failed */
-       bond_for_each_slave_continue_reverse(bond, slave) {
+       bond_for_each_slave(bond, rollback_slave, iter) {
                int tmp_res;
 
-               tmp_res = dev_set_mac_address(slave->dev, &tmp_sa);
+               if (rollback_slave == slave)
+                       break;
+
+               tmp_res = dev_set_mac_address(rollback_slave->dev, &tmp_sa);
                if (tmp_res) {
                        pr_debug("unwind err %d dev %s\n",
-                                tmp_res, slave->dev->name);
+                                tmp_res, rollback_slave->dev->name);
                }
        }
 
@@ -3628,11 +3638,12 @@ unwind:
  */
 void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id)
 {
+       struct list_head *iter;
        struct slave *slave;
        int i = slave_id;
 
        /* Here we start from the slave with slave_id */
-       bond_for_each_slave_rcu(bond, slave) {
+       bond_for_each_slave_rcu(bond, slave, iter) {
                if (--i < 0) {
                        if (slave_can_tx(slave)) {
                                bond_dev_queue_xmit(bond, skb, slave->dev);
@@ -3643,7 +3654,7 @@ void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id)
 
        /* Here we start from the first slave up to slave_id */
        i = slave_id;
-       bond_for_each_slave_rcu(bond, slave) {
+       bond_for_each_slave_rcu(bond, slave, iter) {
                if (--i < 0)
                        break;
                if (slave_can_tx(slave)) {
@@ -3700,8 +3711,7 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_d
        return NETDEV_TX_OK;
 }
 
-/*
- * In bond_xmit_xor() , we determine the output device by using a pre-
+/* In bond_xmit_xor() , we determine the output device by using a pre-
  * determined xmit_hash_policy(), If the selected device is not enabled,
  * find the next active slave.
  */
@@ -3709,8 +3719,7 @@ static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev)
 {
        struct bonding *bond = netdev_priv(bond_dev);
 
-       bond_xmit_slave_id(bond, skb,
-                          bond->xmit_hash_policy(skb, bond->slave_cnt));
+       bond_xmit_slave_id(bond, skb, bond_xmit_hash(bond, skb, bond->slave_cnt));
 
        return NETDEV_TX_OK;
 }
@@ -3720,8 +3729,9 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
 {
        struct bonding *bond = netdev_priv(bond_dev);
        struct slave *slave = NULL;
+       struct list_head *iter;
 
-       bond_for_each_slave_rcu(bond, slave) {
+       bond_for_each_slave_rcu(bond, slave, iter) {
                if (bond_is_last_slave(bond, slave))
                        break;
                if (IS_UP(slave->dev) && slave->link == BOND_LINK_UP) {
@@ -3746,22 +3756,6 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
 
 /*------------------------- Device initialization ---------------------------*/
 
-static void bond_set_xmit_hash_policy(struct bonding *bond)
-{
-       switch (bond->params.xmit_policy) {
-       case BOND_XMIT_POLICY_LAYER23:
-               bond->xmit_hash_policy = bond_xmit_hash_policy_l23;
-               break;
-       case BOND_XMIT_POLICY_LAYER34:
-               bond->xmit_hash_policy = bond_xmit_hash_policy_l34;
-               break;
-       case BOND_XMIT_POLICY_LAYER2:
-       default:
-               bond->xmit_hash_policy = bond_xmit_hash_policy_l2;
-               break;
-       }
-}
-
 /*
  * Lookup the slave that corresponds to a qid
  */
@@ -3770,13 +3764,14 @@ static inline int bond_slave_override(struct bonding *bond,
 {
        struct slave *slave = NULL;
        struct slave *check_slave;
+       struct list_head *iter;
        int res = 1;
 
        if (!skb->queue_mapping)
                return 1;
 
        /* Find out if any slaves have the same mapping as this skb. */
-       bond_for_each_slave_rcu(bond, check_slave) {
+       bond_for_each_slave_rcu(bond, check_slave, iter) {
                if (check_slave->queue_id == skb->queue_mapping) {
                        slave = check_slave;
                        break;
@@ -3862,7 +3857,7 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
                return NETDEV_TX_BUSY;
 
        rcu_read_lock();
-       if (!list_empty(&bond->slave_list))
+       if (bond_has_slaves(bond))
                ret = __bond_start_xmit(skb, dev);
        else
                kfree_skb(skb);
@@ -3871,43 +3866,12 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
        return ret;
 }
 
-/*
- * set bond mode specific net device operations
- */
-void bond_set_mode_ops(struct bonding *bond, int mode)
-{
-       struct net_device *bond_dev = bond->dev;
-
-       switch (mode) {
-       case BOND_MODE_ROUNDROBIN:
-               break;
-       case BOND_MODE_ACTIVEBACKUP:
-               break;
-       case BOND_MODE_XOR:
-               bond_set_xmit_hash_policy(bond);
-               break;
-       case BOND_MODE_BROADCAST:
-               break;
-       case BOND_MODE_8023AD:
-               bond_set_xmit_hash_policy(bond);
-               break;
-       case BOND_MODE_ALB:
-               /* FALLTHRU */
-       case BOND_MODE_TLB:
-               break;
-       default:
-               /* Should never happen, mode already checked */
-               pr_err("%s: Error: Unknown bonding mode %d\n",
-                      bond_dev->name, mode);
-               break;
-       }
-}
-
 static int bond_ethtool_get_settings(struct net_device *bond_dev,
                                     struct ethtool_cmd *ecmd)
 {
        struct bonding *bond = netdev_priv(bond_dev);
        unsigned long speed = 0;
+       struct list_head *iter;
        struct slave *slave;
 
        ecmd->duplex = DUPLEX_UNKNOWN;
@@ -3919,7 +3883,7 @@ static int bond_ethtool_get_settings(struct net_device *bond_dev,
         * this is an accurate maximum.
         */
        read_lock(&bond->lock);
-       bond_for_each_slave(bond, slave) {
+       bond_for_each_slave(bond, slave, iter) {
                if (SLAVE_IS_OK(slave)) {
                        if (slave->speed != SPEED_UNKNOWN)
                                speed += slave->speed;
@@ -3994,7 +3958,6 @@ static void bond_setup(struct net_device *bond_dev)
        /* initialize rwlocks */
        rwlock_init(&bond->lock);
        rwlock_init(&bond->curr_slave_lock);
-       INIT_LIST_HEAD(&bond->slave_list);
        bond->params = bonding_defaults;
 
        /* Initialize pointers */
@@ -4004,7 +3967,6 @@ static void bond_setup(struct net_device *bond_dev)
        ether_setup(bond_dev);
        bond_dev->netdev_ops = &bond_netdev_ops;
        bond_dev->ethtool_ops = &bond_ethtool_ops;
-       bond_set_mode_ops(bond, bond->params.mode);
 
        bond_dev->destructor = bond_destructor;
 
@@ -4050,12 +4012,13 @@ static void bond_setup(struct net_device *bond_dev)
 static void bond_uninit(struct net_device *bond_dev)
 {
        struct bonding *bond = netdev_priv(bond_dev);
-       struct slave *slave, *tmp_slave;
+       struct list_head *iter;
+       struct slave *slave;
 
        bond_netpoll_cleanup(bond_dev);
 
        /* Release the bonded slaves */
-       list_for_each_entry_safe(slave, tmp_slave, &bond->slave_list, list)
+       bond_for_each_slave(bond, slave, iter)
                __bond_release_one(bond_dev, slave->dev, true);
        pr_info("%s: released all slaves\n", bond_dev->name);
 
index 20a6ee25bb63e42cdf89c0273d8e1afa30234f26..fb868d6c22dac5c75ecd156e61089e7f6806b6c9 100644 (file)
@@ -10,8 +10,9 @@ static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
        __acquires(&bond->lock)
 {
        struct bonding *bond = seq->private;
-       loff_t off = 0;
+       struct list_head *iter;
        struct slave *slave;
+       loff_t off = 0;
 
        /* make sure the bond won't be taken away */
        rcu_read_lock();
@@ -20,7 +21,7 @@ static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
        if (*pos == 0)
                return SEQ_START_TOKEN;
 
-       bond_for_each_slave(bond, slave)
+       bond_for_each_slave(bond, slave, iter)
                if (++off == *pos)
                        return slave;
 
@@ -30,17 +31,25 @@ static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
 static void *bond_info_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 {
        struct bonding *bond = seq->private;
-       struct slave *slave = v;
+       struct list_head *iter;
+       struct slave *slave;
+       bool found = false;
 
        ++*pos;
        if (v == SEQ_START_TOKEN)
                return bond_first_slave(bond);
 
-       if (bond_is_last_slave(bond, slave))
+       if (bond_is_last_slave(bond, v))
                return NULL;
-       slave = bond_next_slave(bond, slave);
 
-       return slave;
+       bond_for_each_slave(bond, slave, iter) {
+               if (found)
+                       return slave;
+               if (slave == v)
+                       found = true;
+       }
+
+       return NULL;
 }
 
 static void bond_info_seq_stop(struct seq_file *seq, void *v)
index c29b836749b6323fe86c35e7762b25a3c8596978..e9249527e7e70970798f7b4ee43fda075a4a4421 100644 (file)
@@ -168,41 +168,6 @@ static const struct class_attribute class_attr_bonding_masters = {
        .namespace = bonding_namespace,
 };
 
-int bond_create_slave_symlinks(struct net_device *master,
-                              struct net_device *slave)
-{
-       char linkname[IFNAMSIZ+7];
-       int ret = 0;
-
-       /* first, create a link from the slave back to the master */
-       ret = sysfs_create_link(&(slave->dev.kobj), &(master->dev.kobj),
-                               "master");
-       if (ret)
-               return ret;
-       /* next, create a link from the master to the slave */
-       sprintf(linkname, "slave_%s", slave->name);
-       ret = sysfs_create_link(&(master->dev.kobj), &(slave->dev.kobj),
-                               linkname);
-
-       /* free the master link created earlier in case of error */
-       if (ret)
-               sysfs_remove_link(&(slave->dev.kobj), "master");
-
-       return ret;
-
-}
-
-void bond_destroy_slave_symlinks(struct net_device *master,
-                                struct net_device *slave)
-{
-       char linkname[IFNAMSIZ+7];
-
-       sysfs_remove_link(&(slave->dev.kobj), "master");
-       sprintf(linkname, "slave_%s", slave->name);
-       sysfs_remove_link(&(master->dev.kobj), linkname);
-}
-
-
 /*
  * Show the slaves in the current bond.
  */
@@ -210,11 +175,12 @@ static ssize_t bonding_show_slaves(struct device *d,
                                   struct device_attribute *attr, char *buf)
 {
        struct bonding *bond = to_bond(d);
+       struct list_head *iter;
        struct slave *slave;
        int res = 0;
 
        read_lock(&bond->lock);
-       bond_for_each_slave(bond, slave) {
+       bond_for_each_slave(bond, slave, iter) {
                if (res > (PAGE_SIZE - IFNAMSIZ)) {
                        /* not enough space for another interface name */
                        if ((PAGE_SIZE - res) > 10)
@@ -326,7 +292,7 @@ static ssize_t bonding_store_mode(struct device *d,
                goto out;
        }
 
-       if (!list_empty(&bond->slave_list)) {
+       if (bond_has_slaves(bond)) {
                pr_err("unable to update mode of %s because it has slaves.\n",
                        bond->dev->name);
                ret = -EPERM;
@@ -352,7 +318,6 @@ static ssize_t bonding_store_mode(struct device *d,
        /* don't cache arp_validate between modes */
        bond->params.arp_validate = BOND_ARP_VALIDATE_NONE;
        bond->params.mode = new_value;
-       bond_set_mode_ops(bond, bond->params.mode);
        pr_info("%s: setting mode to %s (%d).\n",
                bond->dev->name, bond_mode_tbl[new_value].modename,
                new_value);
@@ -392,7 +357,6 @@ static ssize_t bonding_store_xmit_hash(struct device *d,
                ret = -EINVAL;
        } else {
                bond->params.xmit_policy = new_value;
-               bond_set_mode_ops(bond, bond->params.mode);
                pr_info("%s: setting xmit hash policy to %s (%d).\n",
                        bond->dev->name,
                        xmit_hashtype_tbl[new_value].modename, new_value);
@@ -522,7 +486,7 @@ static ssize_t bonding_store_fail_over_mac(struct device *d,
        if (!rtnl_trylock())
                return restart_syscall();
 
-       if (!list_empty(&bond->slave_list)) {
+       if (bond_has_slaves(bond)) {
                pr_err("%s: Can't alter fail_over_mac with slaves in bond.\n",
                       bond->dev->name);
                ret = -EPERM;
@@ -656,6 +620,7 @@ static ssize_t bonding_store_arp_targets(struct device *d,
                                         const char *buf, size_t count)
 {
        struct bonding *bond = to_bond(d);
+       struct list_head *iter;
        struct slave *slave;
        __be32 newtarget, *targets;
        unsigned long *targets_rx;
@@ -688,7 +653,7 @@ static ssize_t bonding_store_arp_targets(struct device *d,
                         &newtarget);
                /* not to race with bond_arp_rcv */
                write_lock_bh(&bond->lock);
-               bond_for_each_slave(bond, slave)
+               bond_for_each_slave(bond, slave, iter)
                        slave->target_last_arp_rx[ind] = jiffies;
                targets[ind] = newtarget;
                write_unlock_bh(&bond->lock);
@@ -714,7 +679,7 @@ static ssize_t bonding_store_arp_targets(struct device *d,
                        &newtarget);
 
                write_lock_bh(&bond->lock);
-               bond_for_each_slave(bond, slave) {
+               bond_for_each_slave(bond, slave, iter) {
                        targets_rx = slave->target_last_arp_rx;
                        j = ind;
                        for (; (j < BOND_MAX_ARP_TARGETS-1) && targets[j+1]; j++)
@@ -1111,6 +1076,7 @@ static ssize_t bonding_store_primary(struct device *d,
                                     const char *buf, size_t count)
 {
        struct bonding *bond = to_bond(d);
+       struct list_head *iter;
        char ifname[IFNAMSIZ];
        struct slave *slave;
 
@@ -1138,7 +1104,7 @@ static ssize_t bonding_store_primary(struct device *d,
                goto out;
        }
 
-       bond_for_each_slave(bond, slave) {
+       bond_for_each_slave(bond, slave, iter) {
                if (strncmp(slave->dev->name, ifname, IFNAMSIZ) == 0) {
                        pr_info("%s: Setting %s as primary slave.\n",
                                bond->dev->name, slave->dev->name);
@@ -1286,6 +1252,7 @@ static ssize_t bonding_store_active_slave(struct device *d,
 {
        struct slave *slave, *old_active, *new_active;
        struct bonding *bond = to_bond(d);
+       struct list_head *iter;
        char ifname[IFNAMSIZ];
 
        if (!rtnl_trylock())
@@ -1313,7 +1280,7 @@ static ssize_t bonding_store_active_slave(struct device *d,
                goto out;
        }
 
-       bond_for_each_slave(bond, slave) {
+       bond_for_each_slave(bond, slave, iter) {
                if (strncmp(slave->dev->name, ifname, IFNAMSIZ) == 0) {
                        old_active = bond->curr_active_slave;
                        new_active = slave;
@@ -1493,6 +1460,7 @@ static ssize_t bonding_show_queue_id(struct device *d,
                                     char *buf)
 {
        struct bonding *bond = to_bond(d);
+       struct list_head *iter;
        struct slave *slave;
        int res = 0;
 
@@ -1500,7 +1468,7 @@ static ssize_t bonding_show_queue_id(struct device *d,
                return restart_syscall();
 
        read_lock(&bond->lock);
-       bond_for_each_slave(bond, slave) {
+       bond_for_each_slave(bond, slave, iter) {
                if (res > (PAGE_SIZE - IFNAMSIZ - 6)) {
                        /* not enough space for another interface_name:queue_id pair */
                        if ((PAGE_SIZE - res) > 10)
@@ -1529,6 +1497,7 @@ static ssize_t bonding_store_queue_id(struct device *d,
 {
        struct slave *slave, *update_slave;
        struct bonding *bond = to_bond(d);
+       struct list_head *iter;
        u16 qid;
        int ret = count;
        char *delim;
@@ -1565,7 +1534,7 @@ static ssize_t bonding_store_queue_id(struct device *d,
 
        /* Search for thes slave and check for duplicate qids */
        update_slave = NULL;
-       bond_for_each_slave(bond, slave) {
+       bond_for_each_slave(bond, slave, iter) {
                if (sdev == slave->dev)
                        /*
                         * We don't need to check the matching
@@ -1619,6 +1588,7 @@ static ssize_t bonding_store_slaves_active(struct device *d,
 {
        struct bonding *bond = to_bond(d);
        int new_value, ret = count;
+       struct list_head *iter;
        struct slave *slave;
 
        if (sscanf(buf, "%d", &new_value) != 1) {
@@ -1641,7 +1611,7 @@ static ssize_t bonding_store_slaves_active(struct device *d,
        }
 
        read_lock(&bond->lock);
-       bond_for_each_slave(bond, slave) {
+       bond_for_each_slave(bond, slave, iter) {
                if (!bond_is_active_slave(slave)) {
                        if (new_value)
                                slave->inactive = 0;
index 03cf3fd14490c4e4dcf8cd2d61f8bca99c9d55db..0bd04fbda8e91fccb88f9192fcea70e7c69e1996 100644 (file)
        res; })
 
 /* slave list primitives */
-#define bond_to_slave(ptr) list_entry(ptr, struct slave, list)
+#define bond_slave_list(bond) (&(bond)->dev->adj_list.lower)
+
+#define bond_has_slaves(bond) !list_empty(bond_slave_list(bond))
 
 /* IMPORTANT: bond_first/last_slave can return NULL in case of an empty list */
 #define bond_first_slave(bond) \
-       list_first_entry_or_null(&(bond)->slave_list, struct slave, list)
+       (bond_has_slaves(bond) ? \
+               netdev_adjacent_get_private(bond_slave_list(bond)->next) : \
+               NULL)
 #define bond_last_slave(bond) \
-       (list_empty(&(bond)->slave_list) ? NULL : \
-                                          bond_to_slave((bond)->slave_list.prev))
-
-#define bond_is_first_slave(bond, pos) ((pos)->list.prev == &(bond)->slave_list)
-#define bond_is_last_slave(bond, pos) ((pos)->list.next == &(bond)->slave_list)
-
-/* Since bond_first/last_slave can return NULL, these can return NULL too */
-#define bond_next_slave(bond, pos) \
-       (bond_is_last_slave(bond, pos) ? bond_first_slave(bond) : \
-                                        bond_to_slave((pos)->list.next))
+       (bond_has_slaves(bond) ? \
+               netdev_adjacent_get_private(bond_slave_list(bond)->prev) : \
+               NULL)
 
-#define bond_prev_slave(bond, pos) \
-       (bond_is_first_slave(bond, pos) ? bond_last_slave(bond) : \
-                                         bond_to_slave((pos)->list.prev))
-
-/**
- * bond_for_each_slave_from - iterate the slaves list from a starting point
- * @bond:      the bond holding this list.
- * @pos:       current slave.
- * @cnt:       counter for max number of moves
- * @start:     starting point.
- *
- * Caller must hold bond->lock
- */
-#define bond_for_each_slave_from(bond, pos, cnt, start) \
-       for (cnt = 0, pos = start; pos && cnt < (bond)->slave_cnt; \
-            cnt++, pos = bond_next_slave(bond, pos))
+#define bond_is_first_slave(bond, pos) (pos == bond_first_slave(bond))
+#define bond_is_last_slave(bond, pos) (pos == bond_last_slave(bond))
 
 /**
  * bond_for_each_slave - iterate over all slaves
  * @bond:      the bond holding this list
  * @pos:       current slave
+ * @iter:      list_head * iterator
  *
  * Caller must hold bond->lock
  */
-#define bond_for_each_slave(bond, pos) \
-       list_for_each_entry(pos, &(bond)->slave_list, list)
+#define bond_for_each_slave(bond, pos, iter) \
+       netdev_for_each_lower_private((bond)->dev, pos, iter)
 
 /* Caller must have rcu_read_lock */
-#define bond_for_each_slave_rcu(bond, pos) \
-       list_for_each_entry_rcu(pos, &(bond)->slave_list, list)
-
-/**
- * bond_for_each_slave_reverse - iterate in reverse from a given position
- * @bond:      the bond holding this list
- * @pos:       slave to continue from
- *
- * Caller must hold bond->lock
- */
-#define bond_for_each_slave_continue_reverse(bond, pos) \
-       list_for_each_entry_continue_reverse(pos, &(bond)->slave_list, list)
+#define bond_for_each_slave_rcu(bond, pos, iter) \
+       netdev_for_each_lower_private_rcu((bond)->dev, pos, iter)
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
 extern atomic_t netpoll_block_tx;
@@ -188,7 +162,6 @@ struct bond_parm_tbl {
 
 struct slave {
        struct net_device *dev; /* first - useful for panic debug */
-       struct list_head list;
        struct bonding *bond; /* our master */
        int    delay;
        unsigned long jiffies;
@@ -228,7 +201,6 @@ struct slave {
  */
 struct bonding {
        struct   net_device *dev; /* first - useful for panic debug */
-       struct   list_head slave_list;
        struct   slave *curr_active_slave;
        struct   slave *current_arp_slave;
        struct   slave *primary_slave;
@@ -245,7 +217,6 @@ struct bonding {
        char     proc_file_name[IFNAMSIZ];
 #endif /* CONFIG_PROC_FS */
        struct   list_head bond_list;
-       int      (*xmit_hash_policy)(struct sk_buff *, int);
        u16      rr_tx_counter;
        struct   ad_bond_info ad_info;
        struct   alb_bond_info alb_info;
@@ -276,13 +247,7 @@ struct bonding {
 static inline struct slave *bond_get_slave_by_dev(struct bonding *bond,
                                                  struct net_device *slave_dev)
 {
-       struct slave *slave = NULL;
-
-       bond_for_each_slave(bond, slave)
-               if (slave->dev == slave_dev)
-                       return slave;
-
-       return NULL;
+       return netdev_lower_dev_get_private(bond->dev, slave_dev);
 }
 
 static inline struct bonding *bond_get_bond_by_slave(struct slave *slave)
@@ -432,21 +397,18 @@ static inline bool slave_can_tx(struct slave *slave)
 struct bond_net;
 
 int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond, struct slave *slave);
-struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr);
 int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev);
 void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id);
 int bond_create(struct net *net, const char *name);
 int bond_create_sysfs(struct bond_net *net);
 void bond_destroy_sysfs(struct bond_net *net);
 void bond_prepare_sysfs_group(struct bonding *bond);
-int bond_create_slave_symlinks(struct net_device *master, struct net_device *slave);
-void bond_destroy_slave_symlinks(struct net_device *master, struct net_device *slave);
 int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev);
 int bond_release(struct net_device *bond_dev, struct net_device *slave_dev);
 void bond_mii_monitor(struct work_struct *);
 void bond_loadbalance_arp_mon(struct work_struct *);
 void bond_activebackup_arp_mon(struct work_struct *);
-void bond_set_mode_ops(struct bonding *bond, int mode);
+int bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, int count);
 int bond_parse_parm(const char *mode_arg, const struct bond_parm_tbl *tbl);
 void bond_select_active_slave(struct bonding *bond);
 void bond_change_active_slave(struct bonding *bond, struct slave *new_active);
@@ -492,9 +454,10 @@ static inline void bond_destroy_proc_dir(struct bond_net *bn)
 static inline struct slave *bond_slave_has_mac(struct bonding *bond,
                                               const u8 *mac)
 {
+       struct list_head *iter;
        struct slave *tmp;
 
-       bond_for_each_slave(bond, tmp)
+       bond_for_each_slave(bond, tmp, iter)
                if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr))
                        return tmp;
 
index 3b1ff6148702beb3818fbae7da2b0206f7d1c7f4..64f2efaf7638f1234d2015e6628d402eba9517ca 100644 (file)
@@ -1347,7 +1347,7 @@ static int at91_can_probe(struct platform_device *pdev)
        priv->reg_base = addr;
        priv->devtype_data = *devtype_data;
        priv->clk = clk;
-       priv->pdata = pdev->dev.platform_data;
+       priv->pdata = dev_get_platdata(&pdev->dev);
        priv->mb0_id = 0x7ff;
 
        netif_napi_add(dev, &priv->napi, at91_poll, get_mb_rx_num(priv));
index a2700d25ff0ed87fb800d4c157aeef911234d26e..8a0b515b33ea57c5804f1c9a82b774be8aadee7f 100644 (file)
@@ -539,7 +539,7 @@ static int bfin_can_probe(struct platform_device *pdev)
        struct resource *res_mem, *rx_irq, *tx_irq, *err_irq;
        unsigned short *pdata;
 
-       pdata = pdev->dev.platform_data;
+       pdata = dev_get_platdata(&pdev->dev);
        if (!pdata) {
                dev_err(&pdev->dev, "No platform data provided!\n");
                err = -EINVAL;
index b374be7891a296bde66696e19d2137a47d7ffea3..bce0be54c2f59587a2498d2f37821f2634b886d9 100644 (file)
@@ -160,7 +160,6 @@ static int c_can_pci_probe(struct pci_dev *pdev,
        return 0;
 
 out_free_c_can:
-       pci_set_drvdata(pdev, NULL);
        free_c_can_dev(dev);
 out_iounmap:
        pci_iounmap(pdev, addr);
@@ -181,7 +180,6 @@ static void c_can_pci_remove(struct pci_dev *pdev)
 
        unregister_c_can_dev(dev);
 
-       pci_set_drvdata(pdev, NULL);
        free_c_can_dev(dev);
 
        pci_iounmap(pdev, priv->base);
index 294ced3cc227520883c6ebe50ac255634d0f0f7c..d66ac265269c68883070f7857b1cd6d928531f0e 100644 (file)
@@ -322,7 +322,7 @@ static struct platform_driver c_can_plat_driver = {
        .driver = {
                .name = KBUILD_MODNAME,
                .owner = THIS_MODULE,
-               .of_match_table = of_match_ptr(c_can_of_table),
+               .of_match_table = c_can_of_table,
        },
        .probe = c_can_plat_probe,
        .remove = c_can_plat_remove,
index 034bdd816a60c74104b00b5b203c69f120f0555d..ad76734b3ecc79556ee4eb59e5c2025a10824b48 100644 (file)
@@ -152,7 +152,7 @@ static int cc770_get_platform_data(struct platform_device *pdev,
                                   struct cc770_priv *priv)
 {
 
-       struct cc770_platform_data *pdata = pdev->dev.platform_data;
+       struct cc770_platform_data *pdata = dev_get_platdata(&pdev->dev);
 
        priv->can.clock.freq = pdata->osc_freq;
        if (priv->cpu_interface & CPUIF_DSC)
@@ -203,7 +203,7 @@ static int cc770_platform_probe(struct platform_device *pdev)
 
        if (pdev->dev.of_node)
                err = cc770_get_of_node_data(pdev, priv);
-       else if (pdev->dev.platform_data)
+       else if (dev_get_platdata(&pdev->dev))
                err = cc770_get_platform_data(pdev, priv);
        else
                err = -ENODEV;
index 71c677e651d7cbead0f673f53a93e89514e74eb3..df010d64ecbb9cd83e27e2ba7cbed2c8b27a49e7 100644 (file)
@@ -702,7 +702,6 @@ static int flexcan_chip_start(struct net_device *dev)
 {
        struct flexcan_priv *priv = netdev_priv(dev);
        struct flexcan_regs __iomem *regs = priv->base;
-       unsigned int i;
        int err;
        u32 reg_mcr, reg_ctrl;
 
@@ -772,17 +771,6 @@ static int flexcan_chip_start(struct net_device *dev)
        netdev_dbg(dev, "%s: writing ctrl=0x%08x", __func__, reg_ctrl);
        flexcan_write(reg_ctrl, &regs->ctrl);
 
-       for (i = 0; i < ARRAY_SIZE(regs->cantxfg); i++) {
-               flexcan_write(0, &regs->cantxfg[i].can_ctrl);
-               flexcan_write(0, &regs->cantxfg[i].can_id);
-               flexcan_write(0, &regs->cantxfg[i].data[0]);
-               flexcan_write(0, &regs->cantxfg[i].data[1]);
-
-               /* put MB into rx queue */
-               flexcan_write(FLEXCAN_MB_CNT_CODE(0x4),
-                       &regs->cantxfg[i].can_ctrl);
-       }
-
        /* acceptance mask/acceptance code (accept everything) */
        flexcan_write(0x0, &regs->rxgmask);
        flexcan_write(0x0, &regs->rx14mask);
@@ -1074,7 +1062,7 @@ static int flexcan_probe(struct platform_device *pdev)
        priv->dev = dev;
        priv->clk_ipg = clk_ipg;
        priv->clk_per = clk_per;
-       priv->pdata = pdev->dev.platform_data;
+       priv->pdata = dev_get_platdata(&pdev->dev);
        priv->devtype_data = devtype_data;
 
        priv->reg_xceiver = devm_regulator_get(&pdev->dev, "xceiver");
index 36bd6fa1c7f3e4760b5f69f65b420f8d55b66a5f..ab5909a7bae9efa378c0466936002460205f5cf1 100644 (file)
@@ -1769,7 +1769,7 @@ static int ican3_probe(struct platform_device *pdev)
        struct device *dev;
        int ret;
 
-       pdata = pdev->dev.platform_data;
+       pdata = dev_get_platdata(&pdev->dev);
        if (!pdata)
                return -ENXIO;
 
index fe7dd696957ea3d8c4f7e34b21baa866bb797b55..08ac401e0214e091bdf8b851990dd8b45e0bec89 100644 (file)
@@ -999,7 +999,7 @@ static int mcp251x_can_probe(struct spi_device *spi)
 {
        struct net_device *net;
        struct mcp251x_priv *priv;
-       struct mcp251x_platform_data *pdata = spi->dev.platform_data;
+       struct mcp251x_platform_data *pdata = dev_get_platdata(&spi->dev);
        int ret = -ENODEV;
 
        if (!pdata)
index 5c314a961970b0041c7776da283979f00c4114f8..5f0e9b3bfa7bb9a8f80f5267058b83c5db53f4c6 100644 (file)
@@ -964,7 +964,6 @@ static void pch_can_remove(struct pci_dev *pdev)
                pci_disable_msi(priv->dev);
        pci_release_regions(pdev);
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
        pch_can_reset(priv);
        pci_iounmap(pdev, priv->regs);
        free_candev(priv->ndev);
index 3752342a678ac5320e68bf69f747b4f69034758d..835921388e7ba6ab4b6ee98306c629691421d8a7 100644 (file)
@@ -207,7 +207,6 @@ static void ems_pci_del_card(struct pci_dev *pdev)
        kfree(card);
 
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
 }
 
 static void ems_pci_card_reset(struct ems_pci_card *card)
index 217585b97cd3e0851439f8b388c040058b668100..087b13bd300e845a6231d80cfbb29ab0d3c8d41f 100644 (file)
@@ -387,7 +387,6 @@ static void kvaser_pci_remove_one(struct pci_dev *pdev)
 
        pci_release_regions(pdev);
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
 }
 
 static struct pci_driver kvaser_pci_driver = {
index 6b6f0ad75090c4ea463ae49b8e2041dbdaca55df..065ca49eb45e72c48c9d1cc5f8fbda0256fdb195 100644 (file)
@@ -744,8 +744,6 @@ static void peak_pci_remove(struct pci_dev *pdev)
        pci_iounmap(pdev, cfg_base);
        pci_release_regions(pdev);
        pci_disable_device(pdev);
-
-       pci_set_drvdata(pdev, NULL);
 }
 
 static struct pci_driver peak_pci_driver = {
index c52c1e96bf90741455eea01add4129fcfc590bad..f9b4f81cd86a4601abc8fbcb5dd7eaf7aaef81a5 100644 (file)
@@ -477,7 +477,6 @@ static void plx_pci_del_card(struct pci_dev *pdev)
        kfree(card);
 
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
 }
 
 /*
index 8e259c541036c575fc181796ebfe21f0c8798a1d..29f9b632118742eea67904bd445922b2773a894f 100644 (file)
@@ -76,7 +76,7 @@ static int sp_probe(struct platform_device *pdev)
        struct resource *res_mem, *res_irq;
        struct sja1000_platform_data *pdata;
 
-       pdata = pdev->dev.platform_data;
+       pdata = dev_get_platdata(&pdev->dev);
        if (!pdata) {
                dev_err(&pdev->dev, "No platform data provided!\n");
                err = -ENODEV;
index 874188ba06f7172fed36b93db05be06ae0e2bd59..25377e547f9b01f49167caa13f510c51fb2bd1c7 100644 (file)
@@ -76,6 +76,10 @@ MODULE_PARM_DESC(maxdev, "Maximum number of slcan interfaces");
 /* maximum rx buffer len: extended CAN frame with timestamp */
 #define SLC_MTU (sizeof("T1111222281122334455667788EA5F\r")+1)
 
+#define SLC_CMD_LEN 1
+#define SLC_SFF_ID_LEN 3
+#define SLC_EFF_ID_LEN 8
+
 struct slcan {
        int                     magic;
 
@@ -142,47 +146,63 @@ static void slc_bump(struct slcan *sl)
 {
        struct sk_buff *skb;
        struct can_frame cf;
-       int i, dlc_pos, tmp;
-       unsigned long ultmp;
-       char cmd = sl->rbuff[0];
-
-       if ((cmd != 't') && (cmd != 'T') && (cmd != 'r') && (cmd != 'R'))
+       int i, tmp;
+       u32 tmpid;
+       char *cmd = sl->rbuff;
+
+       cf.can_id = 0;
+
+       switch (*cmd) {
+       case 'r':
+               cf.can_id = CAN_RTR_FLAG;
+               /* fallthrough */
+       case 't':
+               /* store dlc ASCII value and terminate SFF CAN ID string */
+               cf.can_dlc = sl->rbuff[SLC_CMD_LEN + SLC_SFF_ID_LEN];
+               sl->rbuff[SLC_CMD_LEN + SLC_SFF_ID_LEN] = 0;
+               /* point to payload data behind the dlc */
+               cmd += SLC_CMD_LEN + SLC_SFF_ID_LEN + 1;
+               break;
+       case 'R':
+               cf.can_id = CAN_RTR_FLAG;
+               /* fallthrough */
+       case 'T':
+               cf.can_id |= CAN_EFF_FLAG;
+               /* store dlc ASCII value and terminate EFF CAN ID string */
+               cf.can_dlc = sl->rbuff[SLC_CMD_LEN + SLC_EFF_ID_LEN];
+               sl->rbuff[SLC_CMD_LEN + SLC_EFF_ID_LEN] = 0;
+               /* point to payload data behind the dlc */
+               cmd += SLC_CMD_LEN + SLC_EFF_ID_LEN + 1;
+               break;
+       default:
                return;
+       }
 
-       if (cmd & 0x20) /* tiny chars 'r' 't' => standard frame format */
-               dlc_pos = 4; /* dlc position tiiid */
-       else
-               dlc_pos = 9; /* dlc position Tiiiiiiiid */
-
-       if (!((sl->rbuff[dlc_pos] >= '0') && (sl->rbuff[dlc_pos] < '9')))
+       if (kstrtou32(sl->rbuff + SLC_CMD_LEN, 16, &tmpid))
                return;
 
-       cf.can_dlc = sl->rbuff[dlc_pos] - '0'; /* get can_dlc from ASCII val */
+       cf.can_id |= tmpid;
 
-       sl->rbuff[dlc_pos] = 0; /* terminate can_id string */
-
-       if (kstrtoul(sl->rbuff+1, 16, &ultmp))
+       /* get can_dlc from sanitized ASCII value */
+       if (cf.can_dlc >= '0' && cf.can_dlc < '9')
+               cf.can_dlc -= '0';
+       else
                return;
 
-       cf.can_id = ultmp;
-
-       if (!(cmd & 0x20)) /* NO tiny chars => extended frame format */
-               cf.can_id |= CAN_EFF_FLAG;
-
-       if ((cmd | 0x20) == 'r') /* RTR frame */
-               cf.can_id |= CAN_RTR_FLAG;
-
        *(u64 *) (&cf.data) = 0; /* clear payload */
 
-       for (i = 0, dlc_pos++; i < cf.can_dlc; i++) {
-               tmp = hex_to_bin(sl->rbuff[dlc_pos++]);
-               if (tmp < 0)
-                       return;
-               cf.data[i] = (tmp << 4);
-               tmp = hex_to_bin(sl->rbuff[dlc_pos++]);
-               if (tmp < 0)
-                       return;
-               cf.data[i] |= tmp;
+       /* RTR frames may have a dlc > 0 but they never have any data bytes */
+       if (!(cf.can_id & CAN_RTR_FLAG)) {
+               for (i = 0; i < cf.can_dlc; i++) {
+                       tmp = hex_to_bin(*cmd++);
+                       if (tmp < 0)
+                               return;
+                       cf.data[i] = (tmp << 4);
+                       tmp = hex_to_bin(*cmd++);
+                       if (tmp < 0)
+                               return;
+                       cf.data[i] |= tmp;
+               }
        }
 
        skb = dev_alloc_skb(sizeof(struct can_frame) +
@@ -209,7 +229,6 @@ static void slc_bump(struct slcan *sl)
 /* parse tty input stream */
 static void slcan_unesc(struct slcan *sl, unsigned char s)
 {
-
        if ((s == '\r') || (s == '\a')) { /* CR or BEL ends the pdu */
                if (!test_and_clear_bit(SLF_ERROR, &sl->flags) &&
                    (sl->rcount > 4))  {
@@ -236,27 +255,46 @@ static void slcan_unesc(struct slcan *sl, unsigned char s)
 /* Encapsulate one can_frame and stuff into a TTY queue. */
 static void slc_encaps(struct slcan *sl, struct can_frame *cf)
 {
-       int actual, idx, i;
-       char cmd;
+       int actual, i;
+       unsigned char *pos;
+       unsigned char *endpos;
+       canid_t id = cf->can_id;
+
+       pos = sl->xbuff;
 
        if (cf->can_id & CAN_RTR_FLAG)
-               cmd = 'R'; /* becomes 'r' in standard frame format */
+               *pos = 'R'; /* becomes 'r' in standard frame format (SFF) */
        else
-               cmd = 'T'; /* becomes 't' in standard frame format */
+               *pos = 'T'; /* becomes 't' in standard frame format (SSF) */
 
-       if (cf->can_id & CAN_EFF_FLAG)
-               sprintf(sl->xbuff, "%c%08X%d", cmd,
-                       cf->can_id & CAN_EFF_MASK, cf->can_dlc);
-       else
-               sprintf(sl->xbuff, "%c%03X%d", cmd | 0x20,
-                       cf->can_id & CAN_SFF_MASK, cf->can_dlc);
+       /* determine number of chars for the CAN-identifier */
+       if (cf->can_id & CAN_EFF_FLAG) {
+               id &= CAN_EFF_MASK;
+               endpos = pos + SLC_EFF_ID_LEN;
+       } else {
+               *pos |= 0x20; /* convert R/T to lower case for SFF */
+               id &= CAN_SFF_MASK;
+               endpos = pos + SLC_SFF_ID_LEN;
+       }
 
-       idx = strlen(sl->xbuff);
+       /* build 3 (SFF) or 8 (EFF) digit CAN identifier */
+       pos++;
+       while (endpos >= pos) {
+               *endpos-- = hex_asc_upper[id & 0xf];
+               id >>= 4;
+       }
+
+       pos += (cf->can_id & CAN_EFF_FLAG) ? SLC_EFF_ID_LEN : SLC_SFF_ID_LEN;
 
-       for (i = 0; i < cf->can_dlc; i++)
-               sprintf(&sl->xbuff[idx + 2*i], "%02X", cf->data[i]);
+       *pos++ = cf->can_dlc + '0';
+
+       /* RTR frames may have a dlc > 0 but they never have any data bytes */
+       if (!(cf->can_id & CAN_RTR_FLAG)) {
+               for (i = 0; i < cf->can_dlc; i++)
+                       pos = hex_byte_pack_upper(pos, cf->data[i]);
+       }
 
-       strcat(sl->xbuff, "\r"); /* add terminating character */
+       *pos++ = '\r';
 
        /* Order of next two lines is *very* important.
         * When we are sending a little amount of data,
@@ -267,8 +305,8 @@ static void slc_encaps(struct slcan *sl, struct can_frame *cf)
         *       14 Oct 1994  Dmitry Gorodchanin.
         */
        set_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
-       actual = sl->tty->ops->write(sl->tty, sl->xbuff, strlen(sl->xbuff));
-       sl->xleft = strlen(sl->xbuff) - actual;
+       actual = sl->tty->ops->write(sl->tty, sl->xbuff, pos - sl->xbuff);
+       sl->xleft = (pos - sl->xbuff) - actual;
        sl->xhead = sl->xbuff + actual;
        sl->dev->stats.tx_bytes += cf->can_dlc;
 }
@@ -286,11 +324,13 @@ static void slcan_write_wakeup(struct tty_struct *tty)
        if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev))
                return;
 
+       spin_lock(&sl->lock);
        if (sl->xleft <= 0)  {
                /* Now serial buffer is almost free & we can start
                 * transmission of another packet */
                sl->dev->stats.tx_packets++;
                clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+               spin_unlock(&sl->lock);
                netif_wake_queue(sl->dev);
                return;
        }
@@ -298,6 +338,7 @@ static void slcan_write_wakeup(struct tty_struct *tty)
        actual = tty->ops->write(tty, sl->xhead, sl->xleft);
        sl->xleft -= actual;
        sl->xhead += actual;
+       spin_unlock(&sl->lock);
 }
 
 /* Send a can_frame to a TTY queue. */
index 65eef1eea2e2434ca47cfeb086095a415d2b385e..6cd5c01b624d592e6fce2c543075822e5751561b 100644 (file)
@@ -768,7 +768,7 @@ static int softing_pdev_remove(struct platform_device *pdev)
 
 static int softing_pdev_probe(struct platform_device *pdev)
 {
-       const struct softing_platform_data *pdat = pdev->dev.platform_data;
+       const struct softing_platform_data *pdat = dev_get_platdata(&pdev->dev);
        struct softing *card;
        struct net_device *netdev;
        struct softing_priv *priv;
index 3a349a22d5bc46eed31bdc32e12d27c25df3bd13..beb5ef834f0fb4de00a703cc4a5db1c79863ecc1 100644 (file)
@@ -894,7 +894,7 @@ static int ti_hecc_probe(struct platform_device *pdev)
        void __iomem *addr;
        int err = -ENODEV;
 
-       pdata = pdev->dev.platform_data;
+       pdata = dev_get_platdata(&pdev->dev);
        if (!pdata) {
                dev_err(&pdev->dev, "No platform data\n");
                goto probe_exit;
index a0f647f92bf55c7388034f9fbd0a377cc1225901..0b7a4c3b01a2976176878607bd457a9ea282e21e 100644 (file)
@@ -463,7 +463,7 @@ static int peak_usb_start(struct peak_usb_device *dev)
        if (i < PCAN_USB_MAX_TX_URBS) {
                if (i == 0) {
                        netdev_err(netdev, "couldn't setup any tx URB\n");
-                       return err;
+                       goto err_tx;
                }
 
                netdev_warn(netdev, "tx performance may be slow\n");
@@ -472,7 +472,7 @@ static int peak_usb_start(struct peak_usb_device *dev)
        if (dev->adapter->dev_start) {
                err = dev->adapter->dev_start(dev);
                if (err)
-                       goto failed;
+                       goto err_adapter;
        }
 
        dev->state |= PCAN_USB_STATE_STARTED;
@@ -481,19 +481,26 @@ static int peak_usb_start(struct peak_usb_device *dev)
        if (dev->adapter->dev_set_bus) {
                err = dev->adapter->dev_set_bus(dev, 1);
                if (err)
-                       goto failed;
+                       goto err_adapter;
        }
 
        dev->can.state = CAN_STATE_ERROR_ACTIVE;
 
        return 0;
 
-failed:
+err_adapter:
        if (err == -ENODEV)
                netif_device_detach(dev->netdev);
 
        netdev_warn(netdev, "couldn't submit control: %d\n", err);
 
+       for (i = 0; i < PCAN_USB_MAX_TX_URBS; i++) {
+               usb_free_urb(dev->tx_contexts[i].urb);
+               dev->tx_contexts[i].urb = NULL;
+       }
+err_tx:
+       usb_kill_anchored_urbs(&dev->rx_submitted);
+
        return err;
 }
 
index f00c76377b446cb63999857138e4f0365eccc015..65b735d4a6ad8aa92dc4cea9490f3f14fe9bbab8 100644 (file)
@@ -35,7 +35,7 @@ config EL3
 
 config 3C515
        tristate "3c515 ISA \"Fast EtherLink\""
-       depends on (ISA || EISA) && ISA_DMA_API
+       depends on ISA && ISA_DMA_API
        ---help---
          If you have a 3Com ISA EtherLink XL "Corkscrew" 3c515 Fast Ethernet
          network card, say Y and read the Ethernet-HOWTO, available from
@@ -70,7 +70,7 @@ config VORTEX
        select MII
        ---help---
          This option enables driver support for a large number of 10Mbps and
-         10/100Mbps EISA, PCI and PCMCIA 3Com network cards:
+         10/100Mbps EISA, PCI and Cardbus 3Com network cards:
 
          "Vortex"    (Fast EtherLink 3c590/3c592/3c595/3c597) EISA and PCI
          "Boomerang" (EtherLink XL 3c900 or 3c905)            PCI
index f92f001551dab53152bf276d189e88b1f984e3b5..36fa577970bbae8741254519c0450d497a286d7e 100644 (file)
@@ -702,7 +702,7 @@ static int ax_init_dev(struct net_device *dev)
                        for (i = 0; i < 16; i++)
                                SA_prom[i] = SA_prom[i+i];
 
-               memcpy(dev->dev_addr, SA_prom, 6);
+               memcpy(dev->dev_addr, SA_prom, ETH_ALEN);
        }
 
 #ifdef CONFIG_AX88796_93CX6
index 10ceca523fc0dc9556733d3c1eeded045ab70303..e07ce5ff2d48bf93e2a3daeb102fb4b78d471532 100644 (file)
@@ -586,10 +586,10 @@ static unsigned long __init lance_probe1( struct net_device *dev,
        switch( lp->cardtype ) {
          case OLD_RIEBL:
                /* No ethernet address! (Set some default address) */
-               memcpy( dev->dev_addr, OldRieblDefHwaddr, 6 );
+               memcpy(dev->dev_addr, OldRieblDefHwaddr, ETH_ALEN);
                break;
          case NEW_RIEBL:
-               lp->memcpy_f( dev->dev_addr, RIEBL_HWADDR_ADDR, 6 );
+               lp->memcpy_f(dev->dev_addr, RIEBL_HWADDR_ADDR, ETH_ALEN);
                break;
          case PAM_CARD:
                i = IO->eeprom;
index 91d52b495848a17d4b9bae0de04b7d1f7c798ba9..427c148bb643538c1d2240036a9dd7cad1be9596 100644 (file)
@@ -1138,7 +1138,7 @@ static int au1000_probe(struct platform_device *pdev)
                aup->phy1_search_mac0 = 1;
        } else {
                if (is_valid_ether_addr(pd->mac)) {
-                       memcpy(dev->dev_addr, pd->mac, 6);
+                       memcpy(dev->dev_addr, pd->mac, ETH_ALEN);
                } else {
                        /* Set a random MAC since no valid provided by platform_data. */
                        eth_hw_addr_random(dev);
index 5c728436b85e7df0cf9421fa6185e45c2b52bd3c..256f590f6bb1a6db167f4375b3ed2719d2ad5073 100644 (file)
@@ -754,7 +754,7 @@ lance_open(struct net_device *dev)
        int i;
 
        if (dev->irq == 0 ||
-               request_irq(dev->irq, lance_interrupt, 0, lp->name, dev)) {
+               request_irq(dev->irq, lance_interrupt, 0, dev->name, dev)) {
                return -EAGAIN;
        }
 
index 2d8e28819779eed0388109791fe0126f5af763f8..bd4e6402003ace03374e5941dd5e27801ad2cb3e 100644 (file)
@@ -1675,7 +1675,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
                                pr_cont(" warning: CSR address invalid,\n");
                                pr_info("    using instead PROM address of");
                        }
-                       memcpy(dev->dev_addr, promaddr, 6);
+                       memcpy(dev->dev_addr, promaddr, ETH_ALEN);
                }
        }
 
index a597b766f0809d3b1e1893e9ec008fcdc813405f..daae0e01625360598194c76fcc4239a3283bc85c 100644 (file)
@@ -1220,8 +1220,8 @@ static void bmac_reset_and_enable(struct net_device *dev)
        if (skb != NULL) {
                data = skb_put(skb, ETHERMINPACKET);
                memset(data, 0, ETHERMINPACKET);
-               memcpy(data, dev->dev_addr, 6);
-               memcpy(data+6, dev->dev_addr, 6);
+               memcpy(data, dev->dev_addr, ETH_ALEN);
+               memcpy(data + ETH_ALEN, dev->dev_addr, ETH_ALEN);
                bmac_transmit_packet(skb, dev);
        }
        spin_unlock_irqrestore(&bp->lock, flags);
index 3ef7092e3f1c5ff0fd31cb53a0253a0c7532d711..1cda49a28f7f0e9a672ae775a832eba31ad76fb3 100644 (file)
@@ -153,7 +153,7 @@ static int atl1c_get_permanent_address(struct atl1c_hw *hw)
 bool atl1c_read_eeprom(struct atl1c_hw *hw, u32 offset, u32 *p_value)
 {
        int i;
-       int ret = false;
+       bool ret = false;
        u32 otp_ctrl_data;
        u32 control;
        u32 data;
index 1966444590f6192e862a713216182ab09052ebef..7a73f3a9fcb5e2a5b0ffbac1ed1d0bc0163769cb 100644 (file)
@@ -313,6 +313,34 @@ static void atl1e_set_multi(struct net_device *netdev)
        }
 }
 
+static void __atl1e_rx_mode(netdev_features_t features, u32 *mac_ctrl_data)
+{
+
+       if (features & NETIF_F_RXALL) {
+               /* enable RX of ALL frames */
+               *mac_ctrl_data |= MAC_CTRL_DBG;
+       } else {
+               /* disable RX of ALL frames */
+               *mac_ctrl_data &= ~MAC_CTRL_DBG;
+       }
+}
+
+static void atl1e_rx_mode(struct net_device *netdev,
+       netdev_features_t features)
+{
+       struct atl1e_adapter *adapter = netdev_priv(netdev);
+       u32 mac_ctrl_data = 0;
+
+       netdev_dbg(adapter->netdev, "%s\n", __func__);
+
+       atl1e_irq_disable(adapter);
+       mac_ctrl_data = AT_READ_REG(&adapter->hw, REG_MAC_CTRL);
+       __atl1e_rx_mode(features, &mac_ctrl_data);
+       AT_WRITE_REG(&adapter->hw, REG_MAC_CTRL, mac_ctrl_data);
+       atl1e_irq_enable(adapter);
+}
+
+
 static void __atl1e_vlan_mode(netdev_features_t features, u32 *mac_ctrl_data)
 {
        if (features & NETIF_F_HW_VLAN_CTAG_RX) {
@@ -394,6 +422,10 @@ static int atl1e_set_features(struct net_device *netdev,
        if (changed & NETIF_F_HW_VLAN_CTAG_RX)
                atl1e_vlan_mode(netdev, features);
 
+       if (changed & NETIF_F_RXALL)
+               atl1e_rx_mode(netdev, features);
+
+
        return 0;
 }
 
@@ -1057,7 +1089,8 @@ static void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter)
                value |= MAC_CTRL_PROMIS_EN;
        if (netdev->flags & IFF_ALLMULTI)
                value |= MAC_CTRL_MC_ALL_EN;
-
+       if (netdev->features & NETIF_F_RXALL)
+               value |= MAC_CTRL_DBG;
        AT_WRITE_REG(hw, REG_MAC_CTRL, value);
 }
 
@@ -1405,7 +1438,8 @@ static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
                        rx_page_desc[que].rx_nxseq++;
 
                        /* error packet */
-                       if (prrs->pkt_flag & RRS_IS_ERR_FRAME) {
+                       if ((prrs->pkt_flag & RRS_IS_ERR_FRAME) &&
+                           !(netdev->features & NETIF_F_RXALL)) {
                                if (prrs->err_flag & (RRS_ERR_BAD_CRC |
                                        RRS_ERR_DRIBBLE | RRS_ERR_CODE |
                                        RRS_ERR_TRUNC)) {
@@ -1418,7 +1452,10 @@ static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
                        }
 
                        packet_size = ((prrs->word1 >> RRS_PKT_SIZE_SHIFT) &
-                                       RRS_PKT_SIZE_MASK) - 4; /* CRC */
+                                       RRS_PKT_SIZE_MASK);
+                       if (likely(!(netdev->features & NETIF_F_RXFCS)))
+                               packet_size -= 4; /* CRC */
+
                        skb = netdev_alloc_skb_ip_align(netdev, packet_size);
                        if (skb == NULL)
                                goto skip_pkt;
@@ -2245,7 +2282,8 @@ static int atl1e_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
                              NETIF_F_HW_VLAN_CTAG_RX;
        netdev->features = netdev->hw_features | NETIF_F_LLTX |
                           NETIF_F_HW_VLAN_CTAG_TX;
-
+       /* not enabled by default */
+       netdev->hw_features |= NETIF_F_RXALL | NETIF_F_RXFCS;
        return 0;
 }
 
index 9b017d9c58e94e5ab671aeda2cfcc969cc561c6e..079a597fa20cd95216947263115437af86fef0f0 100644 (file)
@@ -596,6 +596,7 @@ static void b44_timer(unsigned long __opaque)
 static void b44_tx(struct b44 *bp)
 {
        u32 cur, cons;
+       unsigned bytes_compl = 0, pkts_compl = 0;
 
        cur  = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
        cur /= sizeof(struct dma_desc);
@@ -612,9 +613,14 @@ static void b44_tx(struct b44 *bp)
                                 skb->len,
                                 DMA_TO_DEVICE);
                rp->skb = NULL;
+
+               bytes_compl += skb->len;
+               pkts_compl++;
+
                dev_kfree_skb_irq(skb);
        }
 
+       netdev_completed_queue(bp->dev, pkts_compl, bytes_compl);
        bp->tx_cons = cons;
        if (netif_queue_stopped(bp->dev) &&
            TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
@@ -1018,6 +1024,8 @@ static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
        if (bp->flags & B44_FLAG_REORDER_BUG)
                br32(bp, B44_DMATX_PTR);
 
+       netdev_sent_queue(dev, skb->len);
+
        if (TX_BUFFS_AVAIL(bp) < 1)
                netif_stop_queue(dev);
 
@@ -1416,6 +1424,8 @@ static void b44_init_hw(struct b44 *bp, int reset_kind)
 
        val = br32(bp, B44_ENET_CTRL);
        bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
+
+       netdev_reset_queue(bp->dev);
 }
 
 static int b44_open(struct net_device *dev)
@@ -2101,7 +2111,7 @@ static int b44_get_invariants(struct b44 *bp)
         * valid PHY address. */
        bp->phy_addr &= 0x1F;
 
-       memcpy(bp->dev->dev_addr, addr, 6);
+       memcpy(bp->dev->dev_addr, addr, ETH_ALEN);
 
        if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
                pr_err("Invalid MAC address found in EEPROM\n");
index 249468f953651480a5e0b897d582dd09743c6c3b..7eca5a1747337db6f2eed66803e81b081201e38f 100644 (file)
@@ -149,6 +149,8 @@ static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
        dma_desc->ctl0 = cpu_to_le32(ctl0);
        dma_desc->ctl1 = cpu_to_le32(ctl1);
 
+       netdev_sent_queue(net_dev, skb->len);
+
        wmb();
 
        /* Increase ring->end to point empty slot. We tell hardware the first
@@ -178,6 +180,7 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
        struct device *dma_dev = bgmac->core->dma_dev;
        int empty_slot;
        bool freed = false;
+       unsigned bytes_compl = 0, pkts_compl = 0;
 
        /* The last slot that hardware didn't consume yet */
        empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
@@ -195,6 +198,9 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
                                         slot->skb->len, DMA_TO_DEVICE);
                        slot->dma_addr = 0;
 
+                       bytes_compl += slot->skb->len;
+                       pkts_compl++;
+
                        /* Free memory! :) */
                        dev_kfree_skb(slot->skb);
                        slot->skb = NULL;
@@ -208,6 +214,8 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
                freed = true;
        }
 
+       netdev_completed_queue(bgmac->net_dev, pkts_compl, bytes_compl);
+
        if (freed && netif_queue_stopped(bgmac->net_dev))
                netif_wake_queue(bgmac->net_dev);
 }
@@ -988,6 +996,8 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
        bgmac_miiconfig(bgmac);
        bgmac_phy_init(bgmac);
 
+       netdev_reset_queue(bgmac->net_dev);
+
        bgmac->int_status = 0;
 }
 
index e838a3f74b696dd208680498f3350a3349a65392..61118708fe985c188cd9890e6b62b020629cfbb0 100644 (file)
@@ -5761,8 +5761,8 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
        if (!skb)
                return -ENOMEM;
        packet = skb_put(skb, pkt_size);
-       memcpy(packet, bp->dev->dev_addr, 6);
-       memset(packet + 6, 0x0, 8);
+       memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
+       memset(packet + ETH_ALEN, 0x0, 8);
        for (i = 14; i < pkt_size; i++)
                packet[i] = (unsigned char) (i & 0xff);
 
@@ -8514,7 +8514,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        pci_set_drvdata(pdev, dev);
 
-       memcpy(dev->dev_addr, bp->mac_addr, 6);
+       memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN);
 
        dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
                NETIF_F_TSO | NETIF_F_TSO_ECN |
index 97b3d32a98bd010ab1a1327ef7382e5bd64139b8..8fe4bcb2407d999d9693cbd63820ffe8ec0be607 100644 (file)
@@ -2231,7 +2231,7 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
 #define BNX2X_NUM_TESTS_SF             7
 #define BNX2X_NUM_TESTS_MF             3
 #define BNX2X_NUM_TESTS(bp)            (IS_MF(bp) ? BNX2X_NUM_TESTS_MF : \
-                                                    BNX2X_NUM_TESTS_SF)
+                                            IS_VF(bp) ? 0 : BNX2X_NUM_TESTS_SF)
 
 #define BNX2X_PHY_LOOPBACK             0
 #define BNX2X_MAC_LOOPBACK             1
@@ -2491,11 +2491,5 @@ enum {
 
 #define NUM_MACS       8
 
-enum bnx2x_pci_bus_speed {
-       BNX2X_PCI_LINK_SPEED_2500 = 2500,
-       BNX2X_PCI_LINK_SPEED_5000 = 5000,
-       BNX2X_PCI_LINK_SPEED_8000 = 8000
-};
-
 void bnx2x_set_local_cmng(struct bnx2x *bp);
 #endif /* bnx2x.h */
index 61726af1de6ede3c111d07040e5dec8f4440884a..0c64122aeaffe96eaa1116df01cd98da8a120cb5 100644 (file)
@@ -2481,8 +2481,7 @@ load_error_cnic2:
 load_error_cnic1:
        bnx2x_napi_disable_cnic(bp);
        /* Update the number of queues without the cnic queues */
-       rc = bnx2x_set_real_num_queues(bp, 0);
-       if (rc)
+       if (bnx2x_set_real_num_queues(bp, 0))
                BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
 load_error_cnic0:
        BNX2X_ERR("CNIC-related load failed\n");
@@ -3256,14 +3255,16 @@ static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
        if (prot == IPPROTO_TCP)
                rc |= XMIT_CSUM_TCP;
 
-       if (skb_is_gso_v6(skb)) {
-               rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
-               if (rc & XMIT_CSUM_ENC)
-                       rc |= XMIT_GSO_ENC_V6;
-       } else if (skb_is_gso(skb)) {
-               rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
-               if (rc & XMIT_CSUM_ENC)
-                       rc |= XMIT_GSO_ENC_V4;
+       if (skb_is_gso(skb)) {
+               if (skb_is_gso_v6(skb)) {
+                       rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
+                       if (rc & XMIT_CSUM_ENC)
+                               rc |= XMIT_GSO_ENC_V6;
+               } else {
+                       rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
+                       if (rc & XMIT_CSUM_ENC)
+                               rc |= XMIT_GSO_ENC_V4;
+               }
        }
 
        return rc;
index 324de5f05332e78aa6c108d23891105880ee5bd8..8213cc827aae822307e937fc72bcf6dad2d7fabe 100644 (file)
@@ -639,6 +639,9 @@ static int bnx2x_get_regs_len(struct net_device *dev)
        struct bnx2x *bp = netdev_priv(dev);
        int regdump_len = 0;
 
+       if (IS_VF(bp))
+               return 0;
+
        regdump_len = __bnx2x_get_regs_len(bp);
        regdump_len *= 4;
        regdump_len += sizeof(struct dump_header);
@@ -2900,9 +2903,16 @@ static void bnx2x_self_test(struct net_device *dev,
 
        memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS(bp));
 
+       if (bnx2x_test_nvram(bp) != 0) {
+               if (!IS_MF(bp))
+                       buf[4] = 1;
+               else
+                       buf[0] = 1;
+               etest->flags |= ETH_TEST_FL_FAILED;
+       }
+
        if (!netif_running(dev)) {
-               DP(BNX2X_MSG_ETHTOOL,
-                  "Can't perform self-test when interface is down\n");
+               DP(BNX2X_MSG_ETHTOOL, "Interface is down\n");
                return;
        }
 
@@ -2964,13 +2974,7 @@ static void bnx2x_self_test(struct net_device *dev,
                /* wait until link state is restored */
                bnx2x_wait_for_link(bp, link_up, is_serdes);
        }
-       if (bnx2x_test_nvram(bp) != 0) {
-               if (!IS_MF(bp))
-                       buf[4] = 1;
-               else
-                       buf[0] = 1;
-               etest->flags |= ETH_TEST_FL_FAILED;
-       }
+
        if (bnx2x_test_intr(bp) != 0) {
                if (!IS_MF(bp))
                        buf[5] = 1;
index 32767f6aa33f473a126259e4877fa608ed51b3bf..cf1df8b62e2c2785c0560b77ac5ed4b5fc8ae8b3 100644 (file)
@@ -172,6 +172,7 @@ struct shared_hw_cfg {                       /* NVRAM Offset */
                #define SHARED_HW_CFG_LED_MAC4                       0x000c0000
                #define SHARED_HW_CFG_LED_PHY8                       0x000d0000
                #define SHARED_HW_CFG_LED_EXTPHY1                    0x000e0000
+               #define SHARED_HW_CFG_LED_EXTPHY2                    0x000f0000
 
 
        #define SHARED_HW_CFG_AN_ENABLE_MASK                0x3f000000
index d60a2ea3da192a203a18cda07251c3e2ee60ab42..20dcc02431cac441a8bf3076a7fe6db1abfb04e9 100644 (file)
@@ -175,6 +175,7 @@ typedef int (*read_sfp_module_eeprom_func_p)(struct bnx2x_phy *phy,
 #define EDC_MODE_LINEAR                                0x0022
 #define EDC_MODE_LIMITING                              0x0044
 #define EDC_MODE_PASSIVE_DAC                   0x0055
+#define EDC_MODE_ACTIVE_DAC                    0x0066
 
 /* ETS defines*/
 #define DCBX_INVALID_COS                                       (0xFF)
@@ -3121,7 +3122,7 @@ static void bnx2x_bsc_module_sel(struct link_params *params)
 }
 
 static int bnx2x_bsc_read(struct link_params *params,
-                         struct bnx2x_phy *phy,
+                         struct bnx2x *bp,
                          u8 sl_devid,
                          u16 sl_addr,
                          u8 lc_addr,
@@ -3130,7 +3131,6 @@ static int bnx2x_bsc_read(struct link_params *params,
 {
        u32 val, i;
        int rc = 0;
-       struct bnx2x *bp = params->bp;
 
        if (xfer_cnt > 16) {
                DP(NETIF_MSG_LINK, "invalid xfer_cnt %d. Max is 16 bytes\n",
@@ -3684,6 +3684,41 @@ static void bnx2x_warpcore_enable_AN_KR2(struct bnx2x_phy *phy,
        bnx2x_update_link_attr(params, vars->link_attr_sync);
 }
 
+static void bnx2x_disable_kr2(struct link_params *params,
+                             struct link_vars *vars,
+                             struct bnx2x_phy *phy)
+{
+       struct bnx2x *bp = params->bp;
+       int i;
+       static struct bnx2x_reg_set reg_set[] = {
+               /* Step 1 - Program the TX/RX alignment markers */
+               {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL5, 0x7690},
+               {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL7, 0xe647},
+               {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL6, 0xc4f0},
+               {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL9, 0x7690},
+               {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL11, 0xe647},
+               {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL10, 0xc4f0},
+               {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_USERB0_CTRL, 0x000c},
+               {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL1, 0x6000},
+               {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL3, 0x0000},
+               {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CODE_FIELD, 0x0002},
+               {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI1, 0x0000},
+               {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI2, 0x0af7},
+               {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI3, 0x0af7},
+               {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_BAM_CODE, 0x0002},
+               {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_UD_CODE, 0x0000}
+       };
+       DP(NETIF_MSG_LINK, "Disabling 20G-KR2\n");
+
+       for (i = 0; i < ARRAY_SIZE(reg_set); i++)
+               bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
+                                reg_set[i].val);
+       vars->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE;
+       bnx2x_update_link_attr(params, vars->link_attr_sync);
+
+       vars->check_kr2_recovery_cnt = CHECK_KR2_RECOVERY_CNT;
+}
+
 static void bnx2x_warpcore_set_lpi_passthrough(struct bnx2x_phy *phy,
                                               struct link_params *params)
 {
@@ -3715,7 +3750,6 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
                                        struct link_params *params,
                                        struct link_vars *vars) {
        u16 lane, i, cl72_ctrl, an_adv = 0;
-       u16 ucode_ver;
        struct bnx2x *bp = params->bp;
        static struct bnx2x_reg_set reg_set[] = {
                {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7},
@@ -3806,15 +3840,7 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
 
        /* Advertise pause */
        bnx2x_ext_phy_set_pause(params, phy, vars);
-       /* Set KR Autoneg Work-Around flag for Warpcore version older than D108
-        */
-       bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
-                       MDIO_WC_REG_UC_INFO_B1_VERSION, &ucode_ver);
-       if (ucode_ver < 0xd108) {
-               DP(NETIF_MSG_LINK, "Enable AN KR work-around. WC ver:0x%x\n",
-                              ucode_ver);
-               vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
-       }
+       vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
        bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
                                 MDIO_WC_REG_DIGITAL5_MISC7, 0x100);
 
@@ -3838,6 +3864,8 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
                bnx2x_set_aer_mmd(params, phy);
 
                bnx2x_warpcore_enable_AN_KR2(phy, params, vars);
+       } else {
+               bnx2x_disable_kr2(params, vars, phy);
        }
 
        /* Enable Autoneg: only on the main lane */
@@ -4347,20 +4375,14 @@ static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy,
        struct bnx2x *bp = params->bp;
        u32 serdes_net_if;
        u16 gp_status1 = 0, lnkup = 0, lnkup_kr = 0;
-       u16 lane = bnx2x_get_warpcore_lane(phy, params);
 
        vars->turn_to_run_wc_rt = vars->turn_to_run_wc_rt ? 0 : 1;
 
        if (!vars->turn_to_run_wc_rt)
                return;
 
-       /* Return if there is no link partner */
-       if (!(bnx2x_warpcore_get_sigdet(phy, params))) {
-               DP(NETIF_MSG_LINK, "bnx2x_warpcore_get_sigdet false\n");
-               return;
-       }
-
        if (vars->rx_tx_asic_rst) {
+               u16 lane = bnx2x_get_warpcore_lane(phy, params);
                serdes_net_if = (REG_RD(bp, params->shmem_base +
                                offsetof(struct shmem_region, dev_info.
                                port_hw_config[params->port].default_cfg)) &
@@ -4375,14 +4397,8 @@ static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy,
                                /*10G KR*/
                        lnkup_kr = (gp_status1 >> (12+lane)) & 0x1;
 
-                       DP(NETIF_MSG_LINK,
-                               "gp_status1 0x%x\n", gp_status1);
-
                        if (lnkup_kr || lnkup) {
-                                       vars->rx_tx_asic_rst = 0;
-                                       DP(NETIF_MSG_LINK,
-                                       "link up, rx_tx_asic_rst 0x%x\n",
-                                       vars->rx_tx_asic_rst);
+                               vars->rx_tx_asic_rst = 0;
                        } else {
                                /* Reset the lane to see if link comes up.*/
                                bnx2x_warpcore_reset_lane(bp, phy, 1);
@@ -4507,10 +4523,14 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
                         * enabled transmitter to avoid current leakage in case
                         * no module is connected
                         */
-                       if (bnx2x_is_sfp_module_plugged(phy, params))
-                               bnx2x_sfp_module_detection(phy, params);
-                       else
-                               bnx2x_sfp_e3_set_transmitter(params, phy, 1);
+                       if ((params->loopback_mode == LOOPBACK_NONE) ||
+                           (params->loopback_mode == LOOPBACK_EXT)) {
+                               if (bnx2x_is_sfp_module_plugged(phy, params))
+                                       bnx2x_sfp_module_detection(phy, params);
+                               else
+                                       bnx2x_sfp_e3_set_transmitter(params,
+                                                                    phy, 1);
+                       }
 
                        bnx2x_warpcore_config_sfi(phy, params);
                        break;
@@ -5757,6 +5777,11 @@ static int bnx2x_warpcore_read_status(struct bnx2x_phy *phy,
        rc = bnx2x_get_link_speed_duplex(phy, params, vars, link_up, gp_speed,
                                         duplex);
 
+       /* In case of KR link down, start up the recovering procedure */
+       if ((!link_up) && (phy->media_type == ETH_PHY_KR) &&
+           (!(phy->flags & FLAGS_WC_DUAL_MODE)))
+               vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
+
        DP(NETIF_MSG_LINK, "duplex %x  flow_ctrl 0x%x link_status 0x%x\n",
                   vars->duplex, vars->flow_ctrl, vars->link_status);
        return rc;
@@ -6345,9 +6370,15 @@ int bnx2x_set_led(struct link_params *params,
                         * intended override.
                         */
                        break;
-               } else
+               } else {
+                       u32 nig_led_mode = ((params->hw_led_mode <<
+                                            SHARED_HW_CFG_LED_MODE_SHIFT) ==
+                                           SHARED_HW_CFG_LED_EXTPHY2) ?
+                               (SHARED_HW_CFG_LED_PHY1 >>
+                                SHARED_HW_CFG_LED_MODE_SHIFT) : hw_led_mode;
                        REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
-                              hw_led_mode);
+                              nig_led_mode);
+               }
 
                REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 0);
                /* Set blinking rate to ~15.9Hz */
@@ -6507,6 +6538,11 @@ static int bnx2x_link_initialize(struct link_params *params,
                        params->phy[INT_PHY].config_init(phy, params, vars);
        }
 
+       /* Re-read this value in case it was changed inside config_init due to
+        * limitations of optic module
+        */
+       vars->line_speed = params->phy[INT_PHY].req_line_speed;
+
        /* Init external phy*/
        if (non_ext_phy) {
                if (params->phy[INT_PHY].supported &
@@ -7886,7 +7922,7 @@ static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
                        usleep_range(1000, 2000);
                        bnx2x_warpcore_power_module(params, 1);
                }
-               rc = bnx2x_bsc_read(params, phy, dev_addr, addr32, 0, byte_cnt,
+               rc = bnx2x_bsc_read(params, bp, dev_addr, addr32, 0, byte_cnt,
                                    data_array);
        } while ((rc != 0) && (++cnt < I2C_WA_RETRY_CNT));
 
@@ -8080,7 +8116,10 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
                if (copper_module_type &
                    SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE) {
                        DP(NETIF_MSG_LINK, "Active Copper cable detected\n");
-                       check_limiting_mode = 1;
+                       if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
+                               *edc_mode = EDC_MODE_ACTIVE_DAC;
+                       else
+                               check_limiting_mode = 1;
                } else if (copper_module_type &
                        SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) {
                                DP(NETIF_MSG_LINK,
@@ -8555,6 +8594,7 @@ static void bnx2x_warpcore_set_limiting_mode(struct link_params *params,
                mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT;
                break;
        case EDC_MODE_PASSIVE_DAC:
+       case EDC_MODE_ACTIVE_DAC:
                mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_DAC;
                break;
        default:
@@ -9730,32 +9770,41 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
                         MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL,
                         an_1000_val);
 
-       /* set 100 speed advertisement */
-       if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
-            (phy->speed_cap_mask &
-             (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL |
-              PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))) {
-               an_10_100_val |= (1<<7);
-               /* Enable autoneg and restart autoneg for legacy speeds */
-               autoneg_val |= (1<<9 | 1<<12);
-
-               if (phy->req_duplex == DUPLEX_FULL)
+       /* Set 10/100 speed advertisement */
+       if (phy->req_line_speed == SPEED_AUTO_NEG) {
+               if (phy->speed_cap_mask &
+                   PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL) {
+                       /* Enable autoneg and restart autoneg for legacy speeds
+                        */
+                       autoneg_val |= (1<<9 | 1<<12);
                        an_10_100_val |= (1<<8);
-               DP(NETIF_MSG_LINK, "Advertising 100M\n");
-       }
-       /* set 10 speed advertisement */
-       if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
-            (phy->speed_cap_mask &
-             (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL |
-              PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) &&
-            (phy->supported &
-             (SUPPORTED_10baseT_Half |
-              SUPPORTED_10baseT_Full)))) {
-               an_10_100_val |= (1<<5);
-               autoneg_val |= (1<<9 | 1<<12);
-               if (phy->req_duplex == DUPLEX_FULL)
+                       DP(NETIF_MSG_LINK, "Advertising 100M-FD\n");
+               }
+
+               if (phy->speed_cap_mask &
+                   PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF) {
+                       /* Enable autoneg and restart autoneg for legacy speeds
+                        */
+                       autoneg_val |= (1<<9 | 1<<12);
+                       an_10_100_val |= (1<<7);
+                       DP(NETIF_MSG_LINK, "Advertising 100M-HD\n");
+               }
+
+               if ((phy->speed_cap_mask &
+                    PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) &&
+                   (phy->supported & SUPPORTED_10baseT_Full)) {
                        an_10_100_val |= (1<<6);
-               DP(NETIF_MSG_LINK, "Advertising 10M\n");
+                       autoneg_val |= (1<<9 | 1<<12);
+                       DP(NETIF_MSG_LINK, "Advertising 10M-FD\n");
+               }
+
+               if ((phy->speed_cap_mask &
+                    PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF) &&
+                   (phy->supported & SUPPORTED_10baseT_Half)) {
+                       an_10_100_val |= (1<<5);
+                       autoneg_val |= (1<<9 | 1<<12);
+                       DP(NETIF_MSG_LINK, "Advertising 10M-HD\n");
+               }
        }
 
        /* Only 10/100 are allowed to work in FORCE mode */
@@ -10609,10 +10658,18 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
                                         0x40);
 
                } else {
+                       /* EXTPHY2 LED mode indicate that the 100M/1G/10G LED
+                        * sources are all wired through LED1, rather than only
+                        * 10G in other modes.
+                        */
+                       val = ((params->hw_led_mode <<
+                               SHARED_HW_CFG_LED_MODE_SHIFT) ==
+                              SHARED_HW_CFG_LED_EXTPHY2) ? 0x98 : 0x80;
+
                        bnx2x_cl45_write(bp, phy,
                                         MDIO_PMA_DEVAD,
                                         MDIO_PMA_REG_8481_LED1_MASK,
-                                        0x80);
+                                        val);
 
                        /* Tell LED3 to blink on source */
                        bnx2x_cl45_read(bp, phy,
@@ -13432,43 +13489,6 @@ static void bnx2x_sfp_tx_fault_detection(struct bnx2x_phy *phy,
                }
        }
 }
-static void bnx2x_disable_kr2(struct link_params *params,
-                             struct link_vars *vars,
-                             struct bnx2x_phy *phy)
-{
-       struct bnx2x *bp = params->bp;
-       int i;
-       static struct bnx2x_reg_set reg_set[] = {
-               /* Step 1 - Program the TX/RX alignment markers */
-               {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL5, 0x7690},
-               {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL7, 0xe647},
-               {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL6, 0xc4f0},
-               {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL9, 0x7690},
-               {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL11, 0xe647},
-               {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL10, 0xc4f0},
-               {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_USERB0_CTRL, 0x000c},
-               {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL1, 0x6000},
-               {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL3, 0x0000},
-               {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CODE_FIELD, 0x0002},
-               {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI1, 0x0000},
-               {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI2, 0x0af7},
-               {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI3, 0x0af7},
-               {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_BAM_CODE, 0x0002},
-               {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_UD_CODE, 0x0000}
-       };
-       DP(NETIF_MSG_LINK, "Disabling 20G-KR2\n");
-
-       for (i = 0; i < ARRAY_SIZE(reg_set); i++)
-               bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
-                                reg_set[i].val);
-       vars->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE;
-       bnx2x_update_link_attr(params, vars->link_attr_sync);
-
-       vars->check_kr2_recovery_cnt = CHECK_KR2_RECOVERY_CNT;
-       /* Restart AN on leading lane */
-       bnx2x_warpcore_restart_AN_KR(phy, params);
-}
-
 static void bnx2x_kr2_recovery(struct link_params *params,
                               struct link_vars *vars,
                               struct bnx2x_phy *phy)
@@ -13546,6 +13566,8 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
                /* Disable KR2 on both lanes */
                DP(NETIF_MSG_LINK, "BP=0x%x, NP=0x%x\n", base_page, next_page);
                bnx2x_disable_kr2(params, vars, phy);
+               /* Restart AN on leading lane */
+               bnx2x_warpcore_restart_AN_KR(phy, params);
                return;
        }
 }
index a6704b555042dfd26eab956f64082578bcf51a09..13a569460ef27fc55573f437c9197ecbf5fda1c6 100644 (file)
@@ -4703,6 +4703,14 @@ bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print)
        attn.sig[3] = REG_RD(bp,
                MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
                             port*4);
+       /* Since MCP attentions can't be disabled inside the block, we need to
+        * read AEU registers to see whether they're currently disabled
+        */
+       attn.sig[3] &= ((REG_RD(bp,
+                               !port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0
+                                     : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0) &
+                        MISC_AEU_ENABLE_MCP_PRTY_BITS) |
+                       ~MISC_AEU_ENABLE_MCP_PRTY_BITS);
 
        if (!CHIP_IS_E1x(bp))
                attn.sig[4] = REG_RD(bp,
@@ -5447,26 +5455,24 @@ static void bnx2x_timer(unsigned long data)
        if (IS_PF(bp) &&
            !BP_NOMCP(bp)) {
                int mb_idx = BP_FW_MB_IDX(bp);
-               u32 drv_pulse;
-               u32 mcp_pulse;
+               u16 drv_pulse;
+               u16 mcp_pulse;
 
                ++bp->fw_drv_pulse_wr_seq;
                bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
-               /* TBD - add SYSTEM_TIME */
                drv_pulse = bp->fw_drv_pulse_wr_seq;
                bnx2x_drv_pulse(bp);
 
                mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
                             MCP_PULSE_SEQ_MASK);
                /* The delta between driver pulse and mcp response
-                * should be 1 (before mcp response) or 0 (after mcp response)
+                * should not get too big. If the MFW is more than 5 pulses
+                * behind, we should worry about it enough to generate an error
+                * log.
                 */
-               if ((drv_pulse != mcp_pulse) &&
-                   (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
-                       /* someone lost a heartbeat... */
-                       BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
+               if (((drv_pulse - mcp_pulse) & MCP_PULSE_SEQ_MASK) > 5)
+                       BNX2X_ERR("MFW seems hanged: drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
                                  drv_pulse, mcp_pulse);
-               }
        }
 
        if (bp->state == BNX2X_STATE_OPEN)
@@ -9873,7 +9879,7 @@ static int bnx2x_prev_path_mark_eeh(struct bnx2x *bp)
 static bool bnx2x_prev_is_path_marked(struct bnx2x *bp)
 {
        struct bnx2x_prev_path_list *tmp_list;
-       int rc = false;
+       bool rc = false;
 
        if (down_trylock(&bnx2x_prev_sem))
                return false;
@@ -11740,7 +11746,7 @@ static int bnx2x_open(struct net_device *dev)
        rc = bnx2x_nic_load(bp, LOAD_OPEN);
        if (rc)
                return rc;
-       return bnx2x_open_epilog(bp);
+       return 0;
 }
 
 /* called with rtnl_lock */
@@ -12274,28 +12280,6 @@ err_out:
        return rc;
 }
 
-static void bnx2x_get_pcie_width_speed(struct bnx2x *bp, int *width,
-                                      enum bnx2x_pci_bus_speed *speed)
-{
-       u32 link_speed, val = 0;
-
-       pci_read_config_dword(bp->pdev, PCICFG_LINK_CONTROL, &val);
-       *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
-
-       link_speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
-
-       switch (link_speed) {
-       case 3:
-               *speed = BNX2X_PCI_LINK_SPEED_8000;
-               break;
-       case 2:
-               *speed = BNX2X_PCI_LINK_SPEED_5000;
-               break;
-       default:
-               *speed = BNX2X_PCI_LINK_SPEED_2500;
-       }
-}
-
 static int bnx2x_check_firmware(struct bnx2x *bp)
 {
        const struct firmware *firmware = bp->firmware;
@@ -12652,8 +12636,8 @@ static int bnx2x_init_one(struct pci_dev *pdev,
 {
        struct net_device *dev = NULL;
        struct bnx2x *bp;
-       int pcie_width;
-       enum bnx2x_pci_bus_speed pcie_speed;
+       enum pcie_link_width pcie_width;
+       enum pci_bus_speed pcie_speed;
        int rc, max_non_def_sbs;
        int rx_count, tx_count, rss_count, doorbell_size;
        int max_cos_est;
@@ -12802,18 +12786,19 @@ static int bnx2x_init_one(struct pci_dev *pdev,
                dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
                rtnl_unlock();
        }
-
-       bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
-       BNX2X_DEV_INFO("got pcie width %d and speed %d\n",
-                      pcie_width, pcie_speed);
-
-       BNX2X_DEV_INFO("%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
+       if (pcie_get_minimum_link(bp->pdev, &pcie_speed, &pcie_width) ||
+           pcie_speed == PCI_SPEED_UNKNOWN ||
+           pcie_width == PCIE_LNK_WIDTH_UNKNOWN)
+               BNX2X_DEV_INFO("Failed to determine PCI Express Bandwidth\n");
+       else
+               BNX2X_DEV_INFO(
+                      "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
                       board_info[ent->driver_data].name,
                       (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
                       pcie_width,
-                      pcie_speed == BNX2X_PCI_LINK_SPEED_2500 ? "2.5GHz" :
-                      pcie_speed == BNX2X_PCI_LINK_SPEED_5000 ? "5.0GHz" :
-                      pcie_speed == BNX2X_PCI_LINK_SPEED_8000 ? "8.0GHz" :
+                      pcie_speed == PCIE_SPEED_2_5GT ? "2.5GHz" :
+                      pcie_speed == PCIE_SPEED_5_0GT ? "5.0GHz" :
+                      pcie_speed == PCIE_SPEED_8_0GT ? "8.0GHz" :
                       "Unknown",
                       dev->base_addr, bp->pdev->irq, dev->dev_addr);
 
index 7991f10e1a98f041db4897ad086d2a0172998969..122703d8127e6f3d8c5b8fc7d9736c8e23ea1968 100644 (file)
@@ -1819,7 +1819,7 @@ bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
                fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID);
                if (fid & IGU_FID_ENCODE_IS_PF)
                        current_pf = fid & IGU_FID_PF_NUM_MASK;
-               else if (current_pf == BP_ABS_FUNC(bp))
+               else if (current_pf == BP_FUNC(bp))
                        bnx2x_vf_set_igu_info(bp, sb_id,
                                              (fid & IGU_FID_VF_NUM_MASK));
                DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n",
@@ -3180,6 +3180,7 @@ int bnx2x_enable_sriov(struct bnx2x *bp)
                /* set local queue arrays */
                vf->vfqs = &bp->vfdb->vfqs[qcount];
                qcount += vf_sb_count(vf);
+               bnx2x_iov_static_resc(bp, vf);
        }
 
        /* prepare msix vectors in VF configuration space */
@@ -3187,6 +3188,8 @@ int bnx2x_enable_sriov(struct bnx2x *bp)
                bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx));
                REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL,
                       num_vf_queues);
+               DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n",
+                  vf_idx, num_vf_queues);
        }
        bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
 
@@ -3635,29 +3638,6 @@ alloc_mem_err:
        return -ENOMEM;
 }
 
-int bnx2x_open_epilog(struct bnx2x *bp)
-{
-       /* Enable sriov via delayed work. This must be done via delayed work
-        * because it causes the probe of the vf devices to be run, which invoke
-        * register_netdevice which must have rtnl lock taken. As we are holding
-        * the lock right now, that could only work if the probe would not take
-        * the lock. However, as the probe of the vf may be called from other
-        * contexts as well (such as passthrough to vm fails) it can't assume
-        * the lock is being held for it. Using delayed work here allows the
-        * probe code to simply take the lock (i.e. wait for it to be released
-        * if it is being held). We only want to do this if the number of VFs
-        * was set before PF driver was loaded.
-        */
-       if (IS_SRIOV(bp) && BNX2X_NR_VIRTFN(bp)) {
-               smp_mb__before_clear_bit();
-               set_bit(BNX2X_SP_RTNL_ENABLE_SRIOV, &bp->sp_rtnl_state);
-               smp_mb__after_clear_bit();
-               schedule_delayed_work(&bp->sp_rtnl_task, 0);
-       }
-
-       return 0;
-}
-
 void bnx2x_iov_channel_down(struct bnx2x *bp)
 {
        int vf_idx;
index 059f0d460af2a249e631a913ec98da290d78f323..1ff6a9366629ed88fe79a079391c92e95d1e9baf 100644 (file)
@@ -782,7 +782,6 @@ static inline int bnx2x_vf_headroom(struct bnx2x *bp)
 void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp);
 int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs);
 void bnx2x_iov_channel_down(struct bnx2x *bp);
-int bnx2x_open_epilog(struct bnx2x *bp);
 
 #else /* CONFIG_BNX2X_SRIOV */
 
@@ -842,7 +841,6 @@ static inline int bnx2x_vf_pci_alloc(struct bnx2x *bp) {return 0; }
 static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {}
 static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; }
 static inline void bnx2x_iov_channel_down(struct bnx2x *bp) {}
-static inline int bnx2x_open_epilog(struct bnx2x *bp) {return 0; }
 
 #endif /* CONFIG_BNX2X_SRIOV */
 #endif /* bnx2x_sriov.h */
index 6cfb8873245281a74222926b6052838d6df1eca0..da16953eb2ec58012f059273abf153c7e81293b9 100644 (file)
@@ -1765,28 +1765,28 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
                switch (mbx->first_tlv.tl.type) {
                case CHANNEL_TLV_ACQUIRE:
                        bnx2x_vf_mbx_acquire(bp, vf, mbx);
-                       break;
+                       return;
                case CHANNEL_TLV_INIT:
                        bnx2x_vf_mbx_init_vf(bp, vf, mbx);
-                       break;
+                       return;
                case CHANNEL_TLV_SETUP_Q:
                        bnx2x_vf_mbx_setup_q(bp, vf, mbx);
-                       break;
+                       return;
                case CHANNEL_TLV_SET_Q_FILTERS:
                        bnx2x_vf_mbx_set_q_filters(bp, vf, mbx);
-                       break;
+                       return;
                case CHANNEL_TLV_TEARDOWN_Q:
                        bnx2x_vf_mbx_teardown_q(bp, vf, mbx);
-                       break;
+                       return;
                case CHANNEL_TLV_CLOSE:
                        bnx2x_vf_mbx_close_vf(bp, vf, mbx);
-                       break;
+                       return;
                case CHANNEL_TLV_RELEASE:
                        bnx2x_vf_mbx_release_vf(bp, vf, mbx);
-                       break;
+                       return;
                case CHANNEL_TLV_UPDATE_RSS:
                        bnx2x_vf_mbx_update_rss(bp, vf, mbx);
-                       break;
+                       return;
                }
 
        } else {
@@ -1802,26 +1802,24 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
                for (i = 0; i < 20; i++)
                        DP_CONT(BNX2X_MSG_IOV, "%x ",
                                mbx->msg->req.tlv_buf_size.tlv_buffer[i]);
+       }
 
-               /* test whether we can respond to the VF (do we have an address
-                * for it?)
-                */
-               if (vf->state == VF_ACQUIRED || vf->state == VF_ENABLED) {
-                       /* mbx_resp uses the op_rc of the VF */
-                       vf->op_rc = PFVF_STATUS_NOT_SUPPORTED;
+       /* can we respond to VF (do we have an address for it?) */
+       if (vf->state == VF_ACQUIRED || vf->state == VF_ENABLED) {
+               /* mbx_resp uses the op_rc of the VF */
+               vf->op_rc = PFVF_STATUS_NOT_SUPPORTED;
 
-                       /* notify the VF that we do not support this request */
-                       bnx2x_vf_mbx_resp(bp, vf);
-               } else {
-                       /* can't send a response since this VF is unknown to us
-                        * just ack the FW to release the mailbox and unlock
-                        * the channel.
-                        */
-                       storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
-                       mmiowb();
-                       bnx2x_unlock_vf_pf_channel(bp, vf,
-                                                  mbx->first_tlv.tl.type);
-               }
+               /* notify the VF that we do not support this request */
+               bnx2x_vf_mbx_resp(bp, vf);
+       } else {
+               /* can't send a response since this VF is unknown to us
+                * just ack the FW to release the mailbox and unlock
+                * the channel.
+                */
+               storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
+               /* Firmware ack should be written before unlocking channel */
+               mmiowb();
+               bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
        }
 }
 
index 99394bd49a139414776df9ff776c02b2fe6f3c26..f58a8b80302d9b2088ce139688b76e3a9181bb42 100644 (file)
@@ -393,7 +393,7 @@ static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
 
                        csk->vlan_id = path_resp->vlan_id;
 
-                       memcpy(csk->ha, path_resp->mac_addr, 6);
+                       memcpy(csk->ha, path_resp->mac_addr, ETH_ALEN);
                        if (test_bit(SK_F_IPV6, &csk->flags))
                                memcpy(&csk->src_ip[0], &path_resp->src.v6_addr,
                                       sizeof(struct in6_addr));
@@ -5572,7 +5572,7 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
        if (cdev->max_fcoe_conn > BNX2X_FCOE_NUM_CONNECTIONS)
                cdev->max_fcoe_conn = BNX2X_FCOE_NUM_CONNECTIONS;
 
-       memcpy(cdev->mac_addr, ethdev->iscsi_mac, 6);
+       memcpy(cdev->mac_addr, ethdev->iscsi_mac, ETH_ALEN);
 
        cp->cnic_ops = &cnic_bnx2x_ops;
        cp->start_hw = cnic_start_bnx2x_hw;
index 221a1815fd3658f580fbfeb86a556508b03a1e4a..498569e99a1cfe1493a9097079676d737049d909 100644 (file)
@@ -1375,7 +1375,7 @@ static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
 
        spin_lock_bh(&tp->lock);
 
-       if (tg3_readphy(tp, reg, &val))
+       if (__tg3_readphy(tp, mii_id, reg, &val))
                val = -EIO;
 
        spin_unlock_bh(&tp->lock);
@@ -1390,7 +1390,7 @@ static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
 
        spin_lock_bh(&tp->lock);
 
-       if (tg3_writephy(tp, reg, val))
+       if (__tg3_writephy(tp, mii_id, reg, val))
                ret = -EIO;
 
        spin_unlock_bh(&tp->lock);
@@ -1408,7 +1408,7 @@ static void tg3_mdio_config_5785(struct tg3 *tp)
        u32 val;
        struct phy_device *phydev;
 
-       phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+       phydev = tp->mdio_bus->phy_map[tp->phy_addr];
        switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
        case PHY_ID_BCM50610:
        case PHY_ID_BCM50610M:
@@ -1513,6 +1513,13 @@ static int tg3_mdio_init(struct tg3 *tp)
                                    TG3_CPMU_PHY_STRAP_IS_SERDES;
                if (is_serdes)
                        tp->phy_addr += 7;
+       } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
+               int addr;
+
+               addr = ssb_gige_get_phyaddr(tp->pdev);
+               if (addr < 0)
+                       return addr;
+               tp->phy_addr = addr;
        } else
                tp->phy_addr = TG3_PHY_MII_ADDR;
 
@@ -1533,7 +1540,7 @@ static int tg3_mdio_init(struct tg3 *tp)
        tp->mdio_bus->read     = &tg3_mdio_read;
        tp->mdio_bus->write    = &tg3_mdio_write;
        tp->mdio_bus->reset    = &tg3_mdio_reset;
-       tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
+       tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
        tp->mdio_bus->irq      = &tp->mdio_irq[0];
 
        for (i = 0; i < PHY_MAX_ADDR; i++)
@@ -1554,7 +1561,7 @@ static int tg3_mdio_init(struct tg3 *tp)
                return i;
        }
 
-       phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+       phydev = tp->mdio_bus->phy_map[tp->phy_addr];
 
        if (!phydev || !phydev->drv) {
                dev_warn(&tp->pdev->dev, "No PHY devices\n");
@@ -1964,7 +1971,7 @@ static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
        u32 old_tx_mode = tp->tx_mode;
 
        if (tg3_flag(tp, USE_PHYLIB))
-               autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
+               autoneg = tp->mdio_bus->phy_map[tp->phy_addr]->autoneg;
        else
                autoneg = tp->link_config.autoneg;
 
@@ -2000,7 +2007,7 @@ static void tg3_adjust_link(struct net_device *dev)
        u8 oldflowctrl, linkmesg = 0;
        u32 mac_mode, lcl_adv, rmt_adv;
        struct tg3 *tp = netdev_priv(dev);
-       struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+       struct phy_device *phydev = tp->mdio_bus->phy_map[tp->phy_addr];
 
        spin_lock_bh(&tp->lock);
 
@@ -2089,7 +2096,7 @@ static int tg3_phy_init(struct tg3 *tp)
        /* Bring the PHY back to a known state. */
        tg3_bmcr_reset(tp);
 
-       phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+       phydev = tp->mdio_bus->phy_map[tp->phy_addr];
 
        /* Attach the MAC to the PHY. */
        phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
@@ -2116,7 +2123,7 @@ static int tg3_phy_init(struct tg3 *tp)
                                      SUPPORTED_Asym_Pause);
                break;
        default:
-               phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
+               phy_disconnect(tp->mdio_bus->phy_map[tp->phy_addr]);
                return -EINVAL;
        }
 
@@ -2134,7 +2141,7 @@ static void tg3_phy_start(struct tg3 *tp)
        if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
                return;
 
-       phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+       phydev = tp->mdio_bus->phy_map[tp->phy_addr];
 
        if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
                tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
@@ -2154,13 +2161,13 @@ static void tg3_phy_stop(struct tg3 *tp)
        if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
                return;
 
-       phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
+       phy_stop(tp->mdio_bus->phy_map[tp->phy_addr]);
 }
 
 static void tg3_phy_fini(struct tg3 *tp)
 {
        if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
-               phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
+               phy_disconnect(tp->mdio_bus->phy_map[tp->phy_addr]);
                tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
        }
 }
@@ -4034,7 +4041,7 @@ static int tg3_power_down_prepare(struct tg3 *tp)
                        struct phy_device *phydev;
                        u32 phyid, advertising;
 
-                       phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+                       phydev = tp->mdio_bus->phy_map[tp->phy_addr];
 
                        tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
 
@@ -11922,7 +11929,7 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
                struct phy_device *phydev;
                if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
                        return -EAGAIN;
-               phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+               phydev = tp->mdio_bus->phy_map[tp->phy_addr];
                return phy_ethtool_gset(phydev, cmd);
        }
 
@@ -11989,7 +11996,7 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
                struct phy_device *phydev;
                if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
                        return -EAGAIN;
-               phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+               phydev = tp->mdio_bus->phy_map[tp->phy_addr];
                return phy_ethtool_sset(phydev, cmd);
        }
 
@@ -12144,7 +12151,7 @@ static int tg3_nway_reset(struct net_device *dev)
        if (tg3_flag(tp, USE_PHYLIB)) {
                if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
                        return -EAGAIN;
-               r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
+               r = phy_start_aneg(tp->mdio_bus->phy_map[tp->phy_addr]);
        } else {
                u32 bmcr;
 
@@ -12260,7 +12267,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
                u32 newadv;
                struct phy_device *phydev;
 
-               phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+               phydev = tp->mdio_bus->phy_map[tp->phy_addr];
 
                if (!(phydev->supported & SUPPORTED_Pause) ||
                    (!(phydev->supported & SUPPORTED_Asym_Pause) &&
@@ -13207,8 +13214,8 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
                return -ENOMEM;
 
        tx_data = skb_put(skb, tx_len);
-       memcpy(tx_data, tp->dev->dev_addr, 6);
-       memset(tx_data + 6, 0x0, 8);
+       memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
+       memset(tx_data + ETH_ALEN, 0x0, 8);
 
        tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
 
@@ -13696,7 +13703,7 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                struct phy_device *phydev;
                if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
                        return -EAGAIN;
-               phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+               phydev = tp->mdio_bus->phy_map[tp->phy_addr];
                return phy_mii_ioctl(phydev, ifr, cmd);
        }
 
@@ -16654,8 +16661,8 @@ static int tg3_get_macaddr_sparc(struct tg3 *tp)
        int len;
 
        addr = of_get_property(dp, "local-mac-address", &len);
-       if (addr && len == 6) {
-               memcpy(dev->dev_addr, addr, 6);
+       if (addr && len == ETH_ALEN) {
+               memcpy(dev->dev_addr, addr, ETH_ALEN);
                return 0;
        }
        return -ENODEV;
@@ -16665,7 +16672,7 @@ static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
 {
        struct net_device *dev = tp->dev;
 
-       memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
+       memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
        return 0;
 }
 #endif
@@ -17366,8 +17373,10 @@ static int tg3_init_one(struct pci_dev *pdev,
                        tg3_flag_set(tp, FLUSH_POSTED_WRITES);
                if (ssb_gige_one_dma_at_once(pdev))
                        tg3_flag_set(tp, ONE_DMA_AT_ONCE);
-               if (ssb_gige_have_roboswitch(pdev))
+               if (ssb_gige_have_roboswitch(pdev)) {
+                       tg3_flag_set(tp, USE_PHYLIB);
                        tg3_flag_set(tp, ROBOSWITCH);
+               }
                if (ssb_gige_is_rgmii(pdev))
                        tg3_flag_set(tp, RGMII_MODE);
        }
@@ -17635,7 +17644,7 @@ static int tg3_init_one(struct pci_dev *pdev,
 
        if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
                struct phy_device *phydev;
-               phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+               phydev = tp->mdio_bus->phy_map[tp->phy_addr];
                netdev_info(dev,
                            "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
                            phydev->drv->name, dev_name(&phydev->dev));
index 40c7b93ababc3d39d09e6f0ef9ba8e61af241955..eb33a31b08a01487dc17465ffbc7a4505030b9e2 100644 (file)
@@ -499,7 +499,7 @@ static const struct cmac_statistics *pm3393_update_statistics(struct cmac *mac,
 
 static int pm3393_macaddress_get(struct cmac *cmac, u8 mac_addr[6])
 {
-       memcpy(mac_addr, cmac->instance->mac_addr, 6);
+       memcpy(mac_addr, cmac->instance->mac_addr, ETH_ALEN);
        return 0;
 }
 
@@ -526,7 +526,7 @@ static int pm3393_macaddress_set(struct cmac *cmac, u8 ma[6])
         */
 
        /* Store local copy */
-       memcpy(cmac->instance->mac_addr, ma, 6);
+       memcpy(cmac->instance->mac_addr, ma, ETH_ALEN);
 
        lo  = ((u32) ma[1] << 8) | (u32) ma[0];
        mid = ((u32) ma[3] << 8) | (u32) ma[2];
index c73cabdbd4c08a22fd506eef8a219f02833fb4b3..85d0cda5fbfa6823d9d490bacd14deebfdc0039b 100644 (file)
@@ -3983,6 +3983,7 @@ static int cxgb4_inet6addr_handler(struct notifier_block *this,
        struct net_device *event_dev;
        int ret = NOTIFY_DONE;
        struct bonding *bond = netdev_priv(ifa->idev->dev);
+       struct list_head *iter;
        struct slave *slave;
        struct pci_dev *first_pdev = NULL;
 
@@ -3995,7 +3996,7 @@ static int cxgb4_inet6addr_handler(struct notifier_block *this,
                 * in all of them only once.
                 */
                read_lock(&bond->lock);
-               bond_for_each_slave(bond, slave) {
+               bond_for_each_slave(bond, slave, iter) {
                        if (!first_pdev) {
                                ret = clip_add(slave->dev, ifa, event);
                                /* If clip_add is success then only initialize
index 5f5896e522d2c6068d7b6bc9c685cbc279be48d8..be8efeea51f263481b5bcfef84d7874e9b308726 100644 (file)
@@ -1603,7 +1603,7 @@ dm9000_probe(struct platform_device *pdev)
 
        if (!is_valid_ether_addr(ndev->dev_addr) && pdata != NULL) {
                mac_src = "platform data";
-               memcpy(ndev->dev_addr, pdata->dev_addr, 6);
+               memcpy(ndev->dev_addr, pdata->dev_addr, ETH_ALEN);
        }
 
        if (!is_valid_ether_addr(ndev->dev_addr)) {
index 4a0d3b786288704e60cdf3dd63776dd1a0797c51..add6d7a953c6a0ad425a3a0b471698c13b768b11 100644 (file)
@@ -88,7 +88,8 @@ static inline char *nic_name(struct pci_dev *pdev)
 #define BE_MIN_MTU             256
 
 #define BE_NUM_VLANS_SUPPORTED 64
-#define BE_MAX_EQD             96u
+#define BE_UMC_NUM_VLANS_SUPPORTED     15
+#define BE_MAX_EQD             128u
 #define        BE_MAX_TX_FRAG_COUNT    30
 
 #define EVNT_Q_LEN             1024
@@ -200,6 +201,17 @@ struct be_eq_obj {
        struct be_adapter *adapter;
 } ____cacheline_aligned_in_smp;
 
+struct be_aic_obj {            /* Adaptive interrupt coalescing (AIC) info */
+       bool enable;
+       u32 min_eqd;            /* in usecs */
+       u32 max_eqd;            /* in usecs */
+       u32 prev_eqd;           /* in usecs */
+       u32 et_eqd;             /* configured val when aic is off */
+       ulong jiffies;
+       u64 rx_pkts_prev;       /* Used to calculate RX pps */
+       u64 tx_reqs_prev;       /* Used to calculate TX pps */
+};
+
 struct be_mcc_obj {
        struct be_queue_info q;
        struct be_queue_info cq;
@@ -214,6 +226,7 @@ struct be_tx_stats {
        u64 tx_compl;
        ulong tx_jiffies;
        u32 tx_stops;
+       u32 tx_drv_drops;       /* pkts dropped by driver */
        struct u64_stats_sync sync;
        struct u64_stats_sync sync_compl;
 };
@@ -238,15 +251,12 @@ struct be_rx_page_info {
 struct be_rx_stats {
        u64 rx_bytes;
        u64 rx_pkts;
-       u64 rx_pkts_prev;
-       ulong rx_jiffies;
        u32 rx_drops_no_skbs;   /* skb allocation errors */
        u32 rx_drops_no_frags;  /* HW has no fetched frags */
        u32 rx_post_fail;       /* page post alloc failures */
        u32 rx_compl;
        u32 rx_mcast_pkts;
        u32 rx_compl_err;       /* completions with err set */
-       u32 rx_pps;             /* pkts per second */
        struct u64_stats_sync sync;
 };
 
@@ -333,6 +343,7 @@ enum vf_state {
 
 #define BE_FLAGS_LINK_STATUS_INIT              1
 #define BE_FLAGS_WORKER_SCHEDULED              (1 << 3)
+#define BE_FLAGS_VLAN_PROMISC                  (1 << 4)
 #define BE_FLAGS_NAPI_ENABLED                  (1 << 9)
 #define BE_UC_PMAC_COUNT               30
 #define BE_VF_UC_PMAC_COUNT            2
@@ -403,6 +414,7 @@ struct be_adapter {
        u32 big_page_size;      /* Compounded page size shared by rx wrbs */
 
        struct be_drv_stats drv_stats;
+       struct be_aic_obj aic_obj[MAX_EVT_QS];
        u16 vlans_added;
        u8 vlan_tag[VLAN_N_VID];
        u8 vlan_prio_bmap;      /* Available Priority BitMap */
@@ -470,8 +482,8 @@ struct be_adapter {
 
 #define be_physfn(adapter)             (!adapter->virtfn)
 #define        sriov_enabled(adapter)          (adapter->num_vfs > 0)
-#define sriov_want(adapter)             (be_max_vfs(adapter) && num_vfs && \
-                                        be_physfn(adapter))
+#define sriov_want(adapter)             (be_physfn(adapter) && \
+                                        (num_vfs || pci_num_vf(adapter->pdev)))
 #define for_all_vfs(adapter, vf_cfg, i)                                        \
        for (i = 0, vf_cfg = &adapter->vf_cfg[i]; i < adapter->num_vfs; \
                i++, vf_cfg++)
index 1ab5dab11eff07ab349a0cc795ef5986fe1e9010..787bce8c52464e5749991d53970866ef39ccf218 100644 (file)
@@ -180,6 +180,9 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
                        dev_err(&adapter->pdev->dev,
                                "opcode %d-%d failed:status %d-%d\n",
                                opcode, subsystem, compl_status, extd_status);
+
+                       if (extd_status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
+                               return extd_status;
                }
        }
 done:
@@ -1195,7 +1198,6 @@ int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
 
        if (lancer_chip(adapter)) {
                req->hdr.version = 1;
-               req->if_id = cpu_to_le16(adapter->if_handle);
        } else if (BEx_chip(adapter)) {
                if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC)
                        req->hdr.version = 2;
@@ -1203,6 +1205,8 @@ int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
                req->hdr.version = 2;
        }
 
+       if (req->hdr.version > 0)
+               req->if_id = cpu_to_le16(adapter->if_handle);
        req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
        req->ulp_num = BE_ULP1_NUM;
        req->type = BE_ETH_TX_RING_TYPE_STANDARD;
@@ -1715,11 +1719,12 @@ err:
 /* set the EQ delay interval of an EQ to specified value
  * Uses async mcc
  */
-int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
+int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd,
+                     int num)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_modify_eq_delay *req;
-       int status = 0;
+       int status = 0, i;
 
        spin_lock_bh(&adapter->mcc_lock);
 
@@ -1733,13 +1738,15 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
                OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb, NULL);
 
-       req->num_eq = cpu_to_le32(1);
-       req->delay[0].eq_id = cpu_to_le32(eq_id);
-       req->delay[0].phase = 0;
-       req->delay[0].delay_multiplier = cpu_to_le32(eqd);
+       req->num_eq = cpu_to_le32(num);
+       for (i = 0; i < num; i++) {
+               req->set_eqd[i].eq_id = cpu_to_le32(set_eqd[i].eq_id);
+               req->set_eqd[i].phase = 0;
+               req->set_eqd[i].delay_multiplier =
+                               cpu_to_le32(set_eqd[i].delay_multiplier);
+       }
 
        be_mcc_notify(adapter);
-
 err:
        spin_unlock_bh(&adapter->mcc_lock);
        return status;
@@ -1812,6 +1819,12 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
        } else if (flags & IFF_ALLMULTI) {
                req->if_flags_mask = req->if_flags =
                                cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
+       } else if (flags & BE_FLAGS_VLAN_PROMISC) {
+               req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_VLAN_PROMISCUOUS);
+
+               if (value == ON)
+                       req->if_flags =
+                               cpu_to_le32(BE_IF_FLAGS_VLAN_PROMISCUOUS);
        } else {
                struct netdev_hw_addr *ha;
                int i = 0;
@@ -3510,7 +3523,7 @@ int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
        struct be_cmd_enable_disable_vf *req;
        int status;
 
-       if (!lancer_chip(adapter))
+       if (BEx_chip(adapter))
                return 0;
 
        spin_lock_bh(&adapter->mcc_lock);
index 84f8c524365549f53719de1b0b379b6368b28292..337ef1f96f0b47ecf228bd7fe4452d419b6c0a19 100644 (file)
@@ -60,6 +60,8 @@ enum {
        MCC_STATUS_NOT_SUPPORTED = 66
 };
 
+#define MCC_ADDL_STS_INSUFFICIENT_RESOURCES    0x16
+
 #define CQE_STATUS_COMPL_MASK          0xFFFF
 #define CQE_STATUS_COMPL_SHIFT         0       /* bits 0 - 15 */
 #define CQE_STATUS_EXTD_MASK           0xFFFF
@@ -1055,14 +1057,16 @@ struct be_cmd_resp_get_flow_control {
 } __packed;
 
 /******************** Modify EQ Delay *******************/
+struct be_set_eqd {
+       u32 eq_id;
+       u32 phase;
+       u32 delay_multiplier;
+};
+
 struct be_cmd_req_modify_eq_delay {
        struct be_cmd_req_hdr hdr;
        u32 num_eq;
-       struct {
-               u32 eq_id;
-               u32 phase;
-               u32 delay_multiplier;
-       } delay[8];
+       struct be_set_eqd set_eqd[MAX_EVT_QS];
 } __packed;
 
 struct be_cmd_resp_modify_eq_delay {
@@ -1791,7 +1795,7 @@ struct be_nic_res_desc {
        u8 acpi_params;
        u8 wol_param;
        u16 rsvd7;
-       u32 rsvd8[3];
+       u32 rsvd8[7];
 } __packed;
 
 struct be_cmd_req_get_func_config {
@@ -1894,8 +1898,7 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
                               struct be_dma_mem *nonemb_cmd);
 int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
                      char *fw_on_flash);
-
-int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd);
+int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *, int num);
 int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
                       u32 num, bool untagged, bool promiscuous);
 int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status);
index b440a1fac77b2883b3417eeab76861cc02ad42b9..3dcf817e756dd5969dd64a68553f3c8f50dedba7 100644 (file)
@@ -155,7 +155,9 @@ static const struct be_ethtool_stat et_tx_stats[] = {
        /* Number of times the TX queue was stopped due to lack
         * of spaces in the TXQ.
         */
-       {DRVSTAT_TX_INFO(tx_stops)}
+       {DRVSTAT_TX_INFO(tx_stops)},
+       /* Pkts dropped in the driver's transmit path */
+       {DRVSTAT_TX_INFO(tx_drv_drops)}
 };
 #define ETHTOOL_TXSTATS_NUM (ARRAY_SIZE(et_tx_stats))
 
@@ -290,19 +292,19 @@ static int be_get_coalesce(struct net_device *netdev,
                           struct ethtool_coalesce *et)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
-       struct be_eq_obj *eqo = &adapter->eq_obj[0];
+       struct be_aic_obj *aic = &adapter->aic_obj[0];
 
 
-       et->rx_coalesce_usecs = eqo->cur_eqd;
-       et->rx_coalesce_usecs_high = eqo->max_eqd;
-       et->rx_coalesce_usecs_low = eqo->min_eqd;
+       et->rx_coalesce_usecs = aic->prev_eqd;
+       et->rx_coalesce_usecs_high = aic->max_eqd;
+       et->rx_coalesce_usecs_low = aic->min_eqd;
 
-       et->tx_coalesce_usecs = eqo->cur_eqd;
-       et->tx_coalesce_usecs_high = eqo->max_eqd;
-       et->tx_coalesce_usecs_low = eqo->min_eqd;
+       et->tx_coalesce_usecs = aic->prev_eqd;
+       et->tx_coalesce_usecs_high = aic->max_eqd;
+       et->tx_coalesce_usecs_low = aic->min_eqd;
 
-       et->use_adaptive_rx_coalesce = eqo->enable_aic;
-       et->use_adaptive_tx_coalesce = eqo->enable_aic;
+       et->use_adaptive_rx_coalesce = aic->enable;
+       et->use_adaptive_tx_coalesce = aic->enable;
 
        return 0;
 }
@@ -314,14 +316,17 @@ static int be_set_coalesce(struct net_device *netdev,
                           struct ethtool_coalesce *et)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
+       struct be_aic_obj *aic = &adapter->aic_obj[0];
        struct be_eq_obj *eqo;
        int i;
 
        for_all_evt_queues(adapter, eqo, i) {
-               eqo->enable_aic = et->use_adaptive_rx_coalesce;
-               eqo->max_eqd = min(et->rx_coalesce_usecs_high, BE_MAX_EQD);
-               eqo->min_eqd = min(et->rx_coalesce_usecs_low, eqo->max_eqd);
-               eqo->eqd = et->rx_coalesce_usecs;
+               aic->enable = et->use_adaptive_rx_coalesce;
+               aic->max_eqd = min(et->rx_coalesce_usecs_high, BE_MAX_EQD);
+               aic->min_eqd = min(et->rx_coalesce_usecs_low, aic->max_eqd);
+               aic->et_eqd = min(et->rx_coalesce_usecs, aic->max_eqd);
+               aic->et_eqd = max(aic->et_eqd, aic->min_eqd);
+               aic++;
        }
 
        return 0;
index 100b528b9bd0f85bf26b778b0d928966d11d419a..9daee2e8db04e30dc03f4b8dac4478c6b7be9617 100644 (file)
@@ -855,11 +855,11 @@ static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
        unsigned int eth_hdr_len;
        struct iphdr *ip;
 
-       /* Lancer ASIC has a bug wherein packets that are 32 bytes or less
+       /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or less
         * may cause a transmit stall on that port. So the work-around is to
-        * pad such packets to a 36-byte length.
+        * pad short packets (<= 32 bytes) to a 36-byte length.
         */
-       if (unlikely(lancer_chip(adapter) && skb->len <= 32)) {
+       if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
                if (skb_padto(skb, 36))
                        goto tx_drop;
                skb->len = 36;
@@ -935,8 +935,10 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
        u32 start = txq->head;
 
        skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
-       if (!skb)
+       if (!skb) {
+               tx_stats(txo)->tx_drv_drops++;
                return NETDEV_TX_OK;
+       }
 
        wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
 
@@ -965,6 +967,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
                be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
        } else {
                txq->head = start;
+               tx_stats(txo)->tx_drv_drops++;
                dev_kfree_skb_any(skb);
        }
        return NETDEV_TX_OK;
@@ -1013,18 +1016,40 @@ static int be_vid_config(struct be_adapter *adapter)
        status = be_cmd_vlan_config(adapter, adapter->if_handle,
                                    vids, num, 1, 0);
 
-       /* Set to VLAN promisc mode as setting VLAN filter failed */
        if (status) {
-               dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
-               dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
-               goto set_vlan_promisc;
+               /* Set to VLAN promisc mode as setting VLAN filter failed */
+               if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
+                       goto set_vlan_promisc;
+               dev_err(&adapter->pdev->dev,
+                       "Setting HW VLAN filtering failed.\n");
+       } else {
+               if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
+                       /* hw VLAN filtering re-enabled. */
+                       status = be_cmd_rx_filter(adapter,
+                                                 BE_FLAGS_VLAN_PROMISC, OFF);
+                       if (!status) {
+                               dev_info(&adapter->pdev->dev,
+                                        "Disabling VLAN Promiscuous mode.\n");
+                               adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
+                               dev_info(&adapter->pdev->dev,
+                                        "Re-Enabling HW VLAN filtering\n");
+                       }
+               }
        }
 
        return status;
 
 set_vlan_promisc:
-       status = be_cmd_vlan_config(adapter, adapter->if_handle,
-                                   NULL, 0, 1, 1);
+       dev_warn(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
+
+       status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
+       if (!status) {
+               dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
+               dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering\n");
+               adapter->flags |= BE_FLAGS_VLAN_PROMISC;
+       } else
+               dev_err(&adapter->pdev->dev,
+                       "Failed to enable VLAN Promiscuous mode.\n");
        return status;
 }
 
@@ -1033,10 +1058,6 @@ static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
        struct be_adapter *adapter = netdev_priv(netdev);
        int status = 0;
 
-       if (!lancer_chip(adapter) && !be_physfn(adapter)) {
-               status = -EINVAL;
-               goto ret;
-       }
 
        /* Packets with VID 0 are always received by Lancer by default */
        if (lancer_chip(adapter) && vid == 0)
@@ -1059,11 +1080,6 @@ static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
        struct be_adapter *adapter = netdev_priv(netdev);
        int status = 0;
 
-       if (!lancer_chip(adapter) && !be_physfn(adapter)) {
-               status = -EINVAL;
-               goto ret;
-       }
-
        /* Packets with VID 0 are always received by Lancer by default */
        if (lancer_chip(adapter) && vid == 0)
                goto ret;
@@ -1188,8 +1204,8 @@ static int be_get_vf_config(struct net_device *netdev, int vf,
 
        vi->vf = vf;
        vi->tx_rate = vf_cfg->tx_rate;
-       vi->vlan = vf_cfg->vlan_tag;
-       vi->qos = 0;
+       vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
+       vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
        memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
 
        return 0;
@@ -1199,28 +1215,29 @@ static int be_set_vf_vlan(struct net_device *netdev,
                        int vf, u16 vlan, u8 qos)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
+       struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
        int status = 0;
 
        if (!sriov_enabled(adapter))
                return -EPERM;
 
-       if (vf >= adapter->num_vfs || vlan > 4095)
+       if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
                return -EINVAL;
 
-       if (vlan) {
-               if (adapter->vf_cfg[vf].vlan_tag != vlan) {
+       if (vlan || qos) {
+               vlan |= qos << VLAN_PRIO_SHIFT;
+               if (vf_cfg->vlan_tag != vlan) {
                        /* If this is new value, program it. Else skip. */
-                       adapter->vf_cfg[vf].vlan_tag = vlan;
-
-                       status = be_cmd_set_hsw_config(adapter, vlan,
-                               vf + 1, adapter->vf_cfg[vf].if_handle, 0);
+                       vf_cfg->vlan_tag = vlan;
+                       status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
+                                                      vf_cfg->if_handle, 0);
                }
        } else {
                /* Reset Transparent Vlan Tagging. */
-               adapter->vf_cfg[vf].vlan_tag = 0;
-               vlan = adapter->vf_cfg[vf].def_vid;
+               vf_cfg->vlan_tag = 0;
+               vlan = vf_cfg->def_vid;
                status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
-                       adapter->vf_cfg[vf].if_handle, 0);
+                                              vf_cfg->if_handle, 0);
        }
 
 
@@ -1261,53 +1278,79 @@ static int be_set_vf_tx_rate(struct net_device *netdev,
        return status;
 }
 
-static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
+static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
+                         ulong now)
 {
-       struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
-       ulong now = jiffies;
-       ulong delta = now - stats->rx_jiffies;
-       u64 pkts;
-       unsigned int start, eqd;
+       aic->rx_pkts_prev = rx_pkts;
+       aic->tx_reqs_prev = tx_pkts;
+       aic->jiffies = now;
+}
 
-       if (!eqo->enable_aic) {
-               eqd = eqo->eqd;
-               goto modify_eqd;
-       }
+static void be_eqd_update(struct be_adapter *adapter)
+{
+       struct be_set_eqd set_eqd[MAX_EVT_QS];
+       int eqd, i, num = 0, start;
+       struct be_aic_obj *aic;
+       struct be_eq_obj *eqo;
+       struct be_rx_obj *rxo;
+       struct be_tx_obj *txo;
+       u64 rx_pkts, tx_pkts;
+       ulong now;
+       u32 pps, delta;
 
-       if (eqo->idx >= adapter->num_rx_qs)
-               return;
+       for_all_evt_queues(adapter, eqo, i) {
+               aic = &adapter->aic_obj[eqo->idx];
+               if (!aic->enable) {
+                       if (aic->jiffies)
+                               aic->jiffies = 0;
+                       eqd = aic->et_eqd;
+                       goto modify_eqd;
+               }
 
-       stats = rx_stats(&adapter->rx_obj[eqo->idx]);
+               rxo = &adapter->rx_obj[eqo->idx];
+               do {
+                       start = u64_stats_fetch_begin_bh(&rxo->stats.sync);
+                       rx_pkts = rxo->stats.rx_pkts;
+               } while (u64_stats_fetch_retry_bh(&rxo->stats.sync, start));
 
-       /* Wrapped around */
-       if (time_before(now, stats->rx_jiffies)) {
-               stats->rx_jiffies = now;
-               return;
-       }
+               txo = &adapter->tx_obj[eqo->idx];
+               do {
+                       start = u64_stats_fetch_begin_bh(&txo->stats.sync);
+                       tx_pkts = txo->stats.tx_reqs;
+               } while (u64_stats_fetch_retry_bh(&txo->stats.sync, start));
 
-       /* Update once a second */
-       if (delta < HZ)
-               return;
 
-       do {
-               start = u64_stats_fetch_begin_bh(&stats->sync);
-               pkts = stats->rx_pkts;
-       } while (u64_stats_fetch_retry_bh(&stats->sync, start));
-
-       stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
-       stats->rx_pkts_prev = pkts;
-       stats->rx_jiffies = now;
-       eqd = (stats->rx_pps / 110000) << 3;
-       eqd = min(eqd, eqo->max_eqd);
-       eqd = max(eqd, eqo->min_eqd);
-       if (eqd < 10)
-               eqd = 0;
+               /* Skip, if wrapped around or first calculation */
+               now = jiffies;
+               if (!aic->jiffies || time_before(now, aic->jiffies) ||
+                   rx_pkts < aic->rx_pkts_prev ||
+                   tx_pkts < aic->tx_reqs_prev) {
+                       be_aic_update(aic, rx_pkts, tx_pkts, now);
+                       continue;
+               }
+
+               delta = jiffies_to_msecs(now - aic->jiffies);
+               pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
+                       (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
+               eqd = (pps / 15000) << 2;
 
+               if (eqd < 8)
+                       eqd = 0;
+               eqd = min_t(u32, eqd, aic->max_eqd);
+               eqd = max_t(u32, eqd, aic->min_eqd);
+
+               be_aic_update(aic, rx_pkts, tx_pkts, now);
 modify_eqd:
-       if (eqd != eqo->cur_eqd) {
-               be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
-               eqo->cur_eqd = eqd;
+               if (eqd != aic->prev_eqd) {
+                       set_eqd[num].delay_multiplier = (eqd * 65)/100;
+                       set_eqd[num].eq_id = eqo->q.id;
+                       aic->prev_eqd = eqd;
+                       num++;
+               }
        }
+
+       if (num)
+               be_cmd_modify_eqd(adapter, set_eqd, num);
 }
 
 static void be_rx_stats_update(struct be_rx_obj *rxo,
@@ -1924,6 +1967,7 @@ static int be_evt_queues_create(struct be_adapter *adapter)
 {
        struct be_queue_info *eq;
        struct be_eq_obj *eqo;
+       struct be_aic_obj *aic;
        int i, rc;
 
        adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
@@ -1932,11 +1976,12 @@ static int be_evt_queues_create(struct be_adapter *adapter)
        for_all_evt_queues(adapter, eqo, i) {
                netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
                               BE_NAPI_WEIGHT);
+               aic = &adapter->aic_obj[i];
                eqo->adapter = adapter;
                eqo->tx_budget = BE_TX_BUDGET;
                eqo->idx = i;
-               eqo->max_eqd = BE_MAX_EQD;
-               eqo->enable_aic = true;
+               aic->max_eqd = BE_MAX_EQD;
+               aic->enable = true;
 
                eq = &eqo->q;
                rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
@@ -2923,7 +2968,8 @@ static int be_vf_setup(struct be_adapter *adapter)
                        goto err;
                vf_cfg->def_vid = def_vlan;
 
-               be_cmd_enable_vf(adapter, vf + 1);
+               if (!old_vfs)
+                       be_cmd_enable_vf(adapter, vf + 1);
        }
 
        if (!old_vfs) {
@@ -2948,12 +2994,12 @@ static void BEx_get_resources(struct be_adapter *adapter,
        struct pci_dev *pdev = adapter->pdev;
        bool use_sriov = false;
 
-       if (BE3_chip(adapter) && be_physfn(adapter)) {
+       if (BE3_chip(adapter) && sriov_want(adapter)) {
                int max_vfs;
 
                max_vfs = pci_sriov_get_totalvfs(pdev);
                res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
-               use_sriov = res->max_vfs && num_vfs;
+               use_sriov = res->max_vfs;
        }
 
        if (be_physfn(adapter))
@@ -2963,12 +3009,15 @@ static void BEx_get_resources(struct be_adapter *adapter,
 
        if (adapter->function_mode & FLEX10_MODE)
                res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
+       else if (adapter->function_mode & UMC_ENABLED)
+               res->max_vlans = BE_UMC_NUM_VLANS_SUPPORTED;
        else
                res->max_vlans = BE_NUM_VLANS_SUPPORTED;
        res->max_mcast_mac = BE_MAX_MC;
 
+       /* For BE3 1Gb ports, F/W does not properly support multiple TXQs */
        if (BE2_chip(adapter) || use_sriov || be_is_mc(adapter) ||
-           !be_physfn(adapter))
+           !be_physfn(adapter) || (adapter->port_num > 1))
                res->max_tx_qs = 1;
        else
                res->max_tx_qs = BE3_MAX_TX_QS;
@@ -3010,14 +3059,6 @@ static int be_get_resources(struct be_adapter *adapter)
                adapter->res = res;
        }
 
-       /* For BE3 only check if FW suggests a different max-txqs value */
-       if (BE3_chip(adapter)) {
-               status = be_cmd_get_profile_config(adapter, &res, 0);
-               if (!status && res.max_tx_qs)
-                       adapter->res.max_tx_qs =
-                               min(adapter->res.max_tx_qs, res.max_tx_qs);
-       }
-
        /* For Lancer, SH etc read per-function resource limits from FW.
         * GET_FUNC_CONFIG returns per function guaranteed limits.
         * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
@@ -3242,7 +3283,7 @@ static int be_setup(struct be_adapter *adapter)
                be_cmd_set_flow_control(adapter, adapter->tx_fc,
                                        adapter->rx_fc);
 
-       if (be_physfn(adapter) && num_vfs) {
+       if (sriov_want(adapter)) {
                if (be_max_vfs(adapter))
                        be_vf_setup(adapter);
                else
@@ -4246,7 +4287,6 @@ static void be_worker(struct work_struct *work)
        struct be_adapter *adapter =
                container_of(work, struct be_adapter, work.work);
        struct be_rx_obj *rxo;
-       struct be_eq_obj *eqo;
        int i;
 
        /* when interrupts are not yet enabled, just reap any pending
@@ -4277,8 +4317,7 @@ static void be_worker(struct work_struct *work)
                }
        }
 
-       for_all_evt_queues(adapter, eqo, i)
-               be_eqd_update(adapter, eqo);
+       be_eqd_update(adapter);
 
 reschedule:
        adapter->work_counter++;
index 6b60582ce8cf1c7a82dd3da1d69acb64981014f4..56f2f608a9f43aa27a32b038d21daf83ffe7f682 100644 (file)
@@ -1083,7 +1083,7 @@ static int fs_enet_probe(struct platform_device *ofdev)
 
        mac_addr = of_get_mac_address(ofdev->dev.of_node);
        if (mac_addr)
-               memcpy(ndev->dev_addr, mac_addr, 6);
+               memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
 
        ret = fep->ops->allocate_bd(ndev);
        if (ret)
index 098f133908ae016058f326225e2ed58b9709d592..e006a09ba8990050f3bc9a91e101ae8455419a87 100644 (file)
@@ -452,7 +452,9 @@ static int gianfar_ptp_probe(struct platform_device *dev)
        err = -ENODEV;
 
        etsects->caps = ptp_gianfar_caps;
-       etsects->cksel = DEFAULT_CKSEL;
+
+       if (get_of_u32(node, "fsl,cksel", &etsects->cksel))
+               etsects->cksel = DEFAULT_CKSEL;
 
        if (get_of_u32(node, "fsl,tclk-period", &etsects->tclk_period) ||
            get_of_u32(node, "fsl,tmr-prsc", &etsects->tmr_prsc) ||
index 5930c39672db25eee560dabd16cf38d1ca54636b..d58a3dfc95c296086f3d729819e42bc758ef5f6f 100644 (file)
@@ -3899,7 +3899,7 @@ static int ucc_geth_probe(struct platform_device* ofdev)
 
        mac_addr = of_get_mac_address(np);
        if (mac_addr)
-               memcpy(dev->dev_addr, mac_addr, 6);
+               memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
 
        ugeth->ug_info = ug_info;
        ugeth->dev = device;
index 91227d03274e02685d5e96fe6096e56e6af999eb..37860096f744005a17c7dc805f890e396ac8933e 100644 (file)
@@ -1098,7 +1098,7 @@ static int hp100_open(struct net_device *dev)
        if (request_irq(dev->irq, hp100_interrupt,
                        lp->bus == HP100_BUS_PCI || lp->bus ==
                        HP100_BUS_EISA ? IRQF_SHARED : 0,
-                       "hp100", dev)) {
+                       dev->name, dev)) {
                printk("hp100: %s: unable to get IRQ %d\n", dev->name, dev->irq);
                return -EAGAIN;
        }
index e38816145395c9e2b08416934e0b10b6352b4507..a15877affc9bd6b5791e1df9fa0e8ec2c2905c0d 100644 (file)
@@ -711,7 +711,7 @@ static int init_i596_mem(struct net_device *dev)
        i596_add_cmd(dev, &lp->cf_cmd.cmd);
 
        DEB(DEB_INIT,printk(KERN_DEBUG "%s: queuing CmdSASetup\n", dev->name));
-       memcpy(lp->sa_cmd.eth_addr, dev->dev_addr, 6);
+       memcpy(lp->sa_cmd.eth_addr, dev->dev_addr, ETH_ALEN);
        lp->sa_cmd.cmd.command = CmdSASetup;
        i596_add_cmd(dev, &lp->sa_cmd.cmd);
 
@@ -1155,7 +1155,7 @@ struct net_device * __init i82596_probe(int unit)
                        err = -ENODEV;
                        goto out;
                }
-               memcpy(eth_addr, (void *) 0xfffc1f2c, 6);       /* YUCK! Get addr from NOVRAM */
+               memcpy(eth_addr, (void *) 0xfffc1f2c, ETH_ALEN);        /* YUCK! Get addr from NOVRAM */
                dev->base_addr = MVME_I596_BASE;
                dev->irq = (unsigned) MVME16x_IRQ_I596;
                goto found;
index d653bac4cfc4e2be49d1ab1ff9a1247eae5460aa..861fa15e1e81b11e31f0d8a2f03907056689ce9d 100644 (file)
@@ -607,7 +607,7 @@ static int init_i596_mem(struct net_device *dev)
        i596_add_cmd(dev, &dma->cf_cmd.cmd);
 
        DEB(DEB_INIT, printk(KERN_DEBUG "%s: queuing CmdSASetup\n", dev->name));
-       memcpy(dma->sa_cmd.eth_addr, dev->dev_addr, 6);
+       memcpy(dma->sa_cmd.eth_addr, dev->dev_addr, ETH_ALEN);
        dma->sa_cmd.cmd.command = SWAP16(CmdSASetup);
        DMA_WBACK(dev, &(dma->sa_cmd), sizeof(struct sa_cmd));
        i596_add_cmd(dev, &dma->sa_cmd.cmd);
@@ -1396,13 +1396,13 @@ static void set_multicast_list(struct net_device *dev)
                netdev_for_each_mc_addr(ha, dev) {
                        if (!cnt--)
                                break;
-                       memcpy(cp, ha->addr, 6);
+                       memcpy(cp, ha->addr, ETH_ALEN);
                        if (i596_debug > 1)
                                DEB(DEB_MULTI,
                                    printk(KERN_DEBUG
                                           "%s: Adding address %pM\n",
                                           dev->name, cp));
-                       cp += 6;
+                       cp += ETH_ALEN;
                }
                DMA_WBACK_INV(dev, &dma->mc_cmd, sizeof(struct mc_cmd));
                i596_add_cmd(dev, &cmd->cmd);
index 6b5c7222342c5e96144febc4a55bd7fb7dfc137e..ef21a2e10180c516a5ee3935b15af5fb52b506ab 100644 (file)
@@ -2676,7 +2676,7 @@ static int emac_init_config(struct emac_instance *dev)
                       np->full_name);
                return -ENXIO;
        }
-       memcpy(dev->ndev->dev_addr, p, 6);
+       memcpy(dev->ndev->dev_addr, p, ETH_ALEN);
 
        /* IAHT and GAHT filter parameterization */
        if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
index 59a92d5870b5535826a6fec333f27d1796e062db..9c45efe4c8fecfdc0e16371ee67a622e4bfcbdbe 100644 (file)
 struct emac_instance;
 struct mal_instance;
 
-extern void emac_dbg_register(struct emac_instance *dev);
-extern void emac_dbg_unregister(struct emac_instance *dev);
-extern void mal_dbg_register(struct mal_instance *mal);
-extern void mal_dbg_unregister(struct mal_instance *mal);
-extern int emac_init_debug(void) __init;
-extern void emac_fini_debug(void) __exit;
-extern void emac_dbg_dump_all(void);
+void emac_dbg_register(struct emac_instance *dev);
+void emac_dbg_unregister(struct emac_instance *dev);
+void mal_dbg_register(struct mal_instance *mal);
+void mal_dbg_unregister(struct mal_instance *mal);
+int emac_init_debug(void) __init;
+void emac_fini_debug(void) __exit;
+void emac_dbg_dump_all(void);
 
 # define DBG_LEVEL             1
 
index 668bceeff4a2112eb98eece3405c9594dd8accef..d4f1374d19000ea0562d9c4c738e1ea56caffb4b 100644 (file)
@@ -56,15 +56,15 @@ struct rgmii_instance {
 
 #ifdef CONFIG_IBM_EMAC_RGMII
 
-extern int rgmii_init(void);
-extern void rgmii_exit(void);
-extern int rgmii_attach(struct platform_device *ofdev, int input, int mode);
-extern void rgmii_detach(struct platform_device *ofdev, int input);
-extern void rgmii_get_mdio(struct platform_device *ofdev, int input);
-extern void rgmii_put_mdio(struct platform_device *ofdev, int input);
-extern void rgmii_set_speed(struct platform_device *ofdev, int input, int speed);
-extern int rgmii_get_regs_len(struct platform_device *ofdev);
-extern void *rgmii_dump_regs(struct platform_device *ofdev, void *buf);
+int rgmii_init(void);
+void rgmii_exit(void);
+int rgmii_attach(struct platform_device *ofdev, int input, int mode);
+void rgmii_detach(struct platform_device *ofdev, int input);
+void rgmii_get_mdio(struct platform_device *ofdev, int input);
+void rgmii_put_mdio(struct platform_device *ofdev, int input);
+void rgmii_set_speed(struct platform_device *ofdev, int input, int speed);
+int rgmii_get_regs_len(struct platform_device *ofdev);
+void *rgmii_dump_regs(struct platform_device *ofdev, void *buf);
 
 #else
 
index 350b7096a041a310f620911f7f2e448bc7d4e8ca..4d5f336f07b3669c15c916ea79ce8e90bc9efb47 100644 (file)
@@ -72,13 +72,13 @@ struct tah_instance {
 
 #ifdef CONFIG_IBM_EMAC_TAH
 
-extern int tah_init(void);
-extern void tah_exit(void);
-extern int tah_attach(struct platform_device *ofdev, int channel);
-extern void tah_detach(struct platform_device *ofdev, int channel);
-extern void tah_reset(struct platform_device *ofdev);
-extern int tah_get_regs_len(struct platform_device *ofdev);
-extern void *tah_dump_regs(struct platform_device *ofdev, void *buf);
+int tah_init(void);
+void tah_exit(void);
+int tah_attach(struct platform_device *ofdev, int channel);
+void tah_detach(struct platform_device *ofdev, int channel);
+void tah_reset(struct platform_device *ofdev);
+int tah_get_regs_len(struct platform_device *ofdev);
+void *tah_dump_regs(struct platform_device *ofdev, void *buf);
 
 #else
 
index 455bfb0854934d684164d5105faa45ae89df03bb..0959c55b14591dc7f2d58b1946585eeb0da04d68 100644 (file)
@@ -53,15 +53,15 @@ struct zmii_instance {
 
 #ifdef CONFIG_IBM_EMAC_ZMII
 
-extern int zmii_init(void);
-extern void zmii_exit(void);
-extern int zmii_attach(struct platform_device *ofdev, int input, int *mode);
-extern void zmii_detach(struct platform_device *ofdev, int input);
-extern void zmii_get_mdio(struct platform_device *ofdev, int input);
-extern void zmii_put_mdio(struct platform_device *ofdev, int input);
-extern void zmii_set_speed(struct platform_device *ofdev, int input, int speed);
-extern int zmii_get_regs_len(struct platform_device *ocpdev);
-extern void *zmii_dump_regs(struct platform_device *ofdev, void *buf);
+int zmii_init(void);
+void zmii_exit(void);
+int zmii_attach(struct platform_device *ofdev, int input, int *mode);
+void zmii_detach(struct platform_device *ofdev, int input);
+void zmii_get_mdio(struct platform_device *ofdev, int input);
+void zmii_put_mdio(struct platform_device *ofdev, int input);
+void zmii_set_speed(struct platform_device *ofdev, int input, int speed);
+int zmii_get_regs_len(struct platform_device *ocpdev);
+void *zmii_dump_regs(struct platform_device *ofdev, void *buf);
 
 #else
 # define zmii_init()           0
index 5d41aee69d1646e3b42a1d91b8873825344043cc..952d795230a479c79c0684ee0849a05e5e0ff631 100644 (file)
@@ -1185,7 +1185,7 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
                netdev_for_each_mc_addr(ha, netdev) {
                        /* add the multicast address to the filter table */
                        unsigned long mcast_addr = 0;
-                       memcpy(((char *)&mcast_addr)+2, ha->addr, 6);
+                       memcpy(((char *)&mcast_addr)+2, ha->addr, ETH_ALEN);
                        lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
                                                   IbmVethMcastAddFilter,
                                                   mcast_addr);
@@ -1370,7 +1370,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
        netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
 
        adapter->mac_addr = 0;
-       memcpy(&adapter->mac_addr, mac_addr_p, 6);
+       memcpy(&adapter->mac_addr, mac_addr_p, ETH_ALEN);
 
        netdev->irq = dev->irq;
        netdev->netdev_ops = &ibmveth_netdev_ops;
index 26d9cd59ec75a25451185a8cb933c318f9068912..58c147271a362e68914d55d97b71b327b194e86b 100644 (file)
@@ -325,7 +325,7 @@ enum e1000_state_t {
 #undef pr_fmt
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
-extern struct net_device *e1000_get_hw_dev(struct e1000_hw *hw);
+struct net_device *e1000_get_hw_dev(struct e1000_hw *hw);
 #define e_dbg(format, arg...) \
        netdev_dbg(e1000_get_hw_dev(hw), format, ## arg)
 #define e_err(msglvl, format, arg...) \
@@ -346,20 +346,20 @@ extern struct net_device *e1000_get_hw_dev(struct e1000_hw *hw);
 extern char e1000_driver_name[];
 extern const char e1000_driver_version[];
 
-extern int e1000_up(struct e1000_adapter *adapter);
-extern void e1000_down(struct e1000_adapter *adapter);
-extern void e1000_reinit_locked(struct e1000_adapter *adapter);
-extern void e1000_reset(struct e1000_adapter *adapter);
-extern int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx);
-extern int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
-extern int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
-extern void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
-extern void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
-extern void e1000_update_stats(struct e1000_adapter *adapter);
-extern bool e1000_has_link(struct e1000_adapter *adapter);
-extern void e1000_power_up_phy(struct e1000_adapter *);
-extern void e1000_set_ethtool_ops(struct net_device *netdev);
-extern void e1000_check_options(struct e1000_adapter *adapter);
-extern char *e1000_get_hw_dev_name(struct e1000_hw *hw);
+int e1000_up(struct e1000_adapter *adapter);
+void e1000_down(struct e1000_adapter *adapter);
+void e1000_reinit_locked(struct e1000_adapter *adapter);
+void e1000_reset(struct e1000_adapter *adapter);
+int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx);
+int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
+int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
+void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
+void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
+void e1000_update_stats(struct e1000_adapter *adapter);
+bool e1000_has_link(struct e1000_adapter *adapter);
+void e1000_power_up_phy(struct e1000_adapter *);
+void e1000_set_ethtool_ops(struct net_device *netdev);
+void e1000_check_options(struct e1000_adapter *adapter);
+char *e1000_get_hw_dev_name(struct e1000_hw *hw);
 
 #endif /* _E1000_H_ */
index ad0edd11015d7b40a14d79f06afdab8ecec76cef..0150f7fc893d4ae6985096f17e5a4678f39fc3b3 100644 (file)
@@ -472,26 +472,25 @@ enum latency_range {
 extern char e1000e_driver_name[];
 extern const char e1000e_driver_version[];
 
-extern void e1000e_check_options(struct e1000_adapter *adapter);
-extern void e1000e_set_ethtool_ops(struct net_device *netdev);
-
-extern int e1000e_up(struct e1000_adapter *adapter);
-extern void e1000e_down(struct e1000_adapter *adapter);
-extern void e1000e_reinit_locked(struct e1000_adapter *adapter);
-extern void e1000e_reset(struct e1000_adapter *adapter);
-extern void e1000e_power_up_phy(struct e1000_adapter *adapter);
-extern int e1000e_setup_rx_resources(struct e1000_ring *ring);
-extern int e1000e_setup_tx_resources(struct e1000_ring *ring);
-extern void e1000e_free_rx_resources(struct e1000_ring *ring);
-extern void e1000e_free_tx_resources(struct e1000_ring *ring);
-extern struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
-                                                   struct rtnl_link_stats64
-                                                   *stats);
-extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
-extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
-extern void e1000e_get_hw_control(struct e1000_adapter *adapter);
-extern void e1000e_release_hw_control(struct e1000_adapter *adapter);
-extern void e1000e_write_itr(struct e1000_adapter *adapter, u32 itr);
+void e1000e_check_options(struct e1000_adapter *adapter);
+void e1000e_set_ethtool_ops(struct net_device *netdev);
+
+int e1000e_up(struct e1000_adapter *adapter);
+void e1000e_down(struct e1000_adapter *adapter);
+void e1000e_reinit_locked(struct e1000_adapter *adapter);
+void e1000e_reset(struct e1000_adapter *adapter);
+void e1000e_power_up_phy(struct e1000_adapter *adapter);
+int e1000e_setup_rx_resources(struct e1000_ring *ring);
+int e1000e_setup_tx_resources(struct e1000_ring *ring);
+void e1000e_free_rx_resources(struct e1000_ring *ring);
+void e1000e_free_tx_resources(struct e1000_ring *ring);
+struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
+                                            struct rtnl_link_stats64 *stats);
+void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
+void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
+void e1000e_get_hw_control(struct e1000_adapter *adapter);
+void e1000e_release_hw_control(struct e1000_adapter *adapter);
+void e1000e_write_itr(struct e1000_adapter *adapter, u32 itr);
 
 extern unsigned int copybreak;
 
@@ -508,8 +507,8 @@ extern const struct e1000_info e1000_pch2_info;
 extern const struct e1000_info e1000_pch_lpt_info;
 extern const struct e1000_info e1000_es2_info;
 
-extern void e1000e_ptp_init(struct e1000_adapter *adapter);
-extern void e1000e_ptp_remove(struct e1000_adapter *adapter);
+void e1000e_ptp_init(struct e1000_adapter *adapter);
+void e1000e_ptp_remove(struct e1000_adapter *adapter);
 
 static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw)
 {
@@ -536,7 +535,7 @@ static inline s32 e1e_wphy_locked(struct e1000_hw *hw, u32 offset, u16 data)
        return hw->phy.ops.write_reg_locked(hw, offset, data);
 }
 
-extern void e1000e_reload_nvm_generic(struct e1000_hw *hw);
+void e1000e_reload_nvm_generic(struct e1000_hw *hw);
 
 static inline s32 e1000e_read_mac_addr(struct e1000_hw *hw)
 {
index 0c524fa9f8111a092c07d69a442c75dea43f0f42..cfef7fc32cdd4643382f6d0125589fd1e3de6a95 100644 (file)
@@ -701,8 +701,7 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
 
        details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
        if (cmd_details) {
-               memcpy(details, cmd_details,
-                      sizeof(struct i40e_asq_cmd_details));
+               *details = *cmd_details;
 
                /* If the cmd_details are defined copy the cookie.  The
                 * cpu_to_le32 is not needed here because the data is ignored
@@ -760,7 +759,7 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
        desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
 
        /* if the desc is available copy the temp desc to the right place */
-       memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc));
+       *desc_on_ring = *desc;
 
        /* if buff is not NULL assume indirect command */
        if (buff != NULL) {
@@ -807,7 +806,7 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
 
        /* if ready, copy the desc back to temp */
        if (i40e_asq_done(hw)) {
-               memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc));
+               *desc = *desc_on_ring;
                if (buff != NULL)
                        memcpy(buff, dma_buff->va, buff_size);
                retval = le16_to_cpu(desc->retval);
index c21df7bc3b1dd9b348050c1593866b33e42c7cf6..1e4ea134975ac43e8288132e6e4b93bacb2f4b7e 100644 (file)
@@ -507,7 +507,7 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
 
        /* save link status information */
        if (link)
-               memcpy(link, hw_link_info, sizeof(struct i40e_link_status));
+               *link = *hw_link_info;
 
        /* flag cleared so helper functions don't call AQ again */
        hw->phy.get_link_info = false;
index 601d482694ea8486273371a6cb6d4b0a968cd33b..221aa4795017649ccdd57accb01520c4bccb42e8 100644 (file)
@@ -101,10 +101,10 @@ int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
        mem->size = ALIGN(size, alignment);
        mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size,
                                      &mem->pa, GFP_KERNEL);
-       if (mem->va)
-               return 0;
+       if (!mem->va)
+               return -ENOMEM;
 
-       return -ENOMEM;
+       return 0;
 }
 
 /**
@@ -136,10 +136,10 @@ int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
        mem->size = size;
        mem->va = kzalloc(size, GFP_KERNEL);
 
-       if (mem->va)
-               return 0;
+       if (!mem->va)
+               return -ENOMEM;
 
-       return -ENOMEM;
+       return 0;
 }
 
 /**
@@ -174,8 +174,7 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
                         u16 needed, u16 id)
 {
        int ret = -ENOMEM;
-       int i = 0;
-       int j = 0;
+       int i, j;
 
        if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
                dev_info(&pf->pdev->dev,
@@ -186,7 +185,7 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
 
        /* start the linear search with an imperfect hint */
        i = pile->search_hint;
-       while (i < pile->num_entries && ret < 0) {
+       while (i < pile->num_entries) {
                /* skip already allocated entries */
                if (pile->list[i] & I40E_PILE_VALID_BIT) {
                        i++;
@@ -205,6 +204,7 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
                                pile->list[i+j] = id | I40E_PILE_VALID_BIT;
                        ret = i;
                        pile->search_hint = i + j;
+                       break;
                } else {
                        /* not enough, so skip over it and continue looking */
                        i += j;
@@ -1388,7 +1388,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
        bool add_happened = false;
        int filter_list_len = 0;
        u32 changed_flags = 0;
-       i40e_status ret = 0;
+       i40e_status aq_ret = 0;
        struct i40e_pf *pf;
        int num_add = 0;
        int num_del = 0;
@@ -1449,28 +1449,28 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
 
                        /* flush a full buffer */
                        if (num_del == filter_list_len) {
-                               ret = i40e_aq_remove_macvlan(&pf->hw,
+                               aq_ret = i40e_aq_remove_macvlan(&pf->hw,
                                            vsi->seid, del_list, num_del,
                                            NULL);
                                num_del = 0;
                                memset(del_list, 0, sizeof(*del_list));
 
-                               if (ret)
+                               if (aq_ret)
                                        dev_info(&pf->pdev->dev,
                                                 "ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n",
-                                                ret,
+                                                aq_ret,
                                                 pf->hw.aq.asq_last_status);
                        }
                }
                if (num_del) {
-                       ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
+                       aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
                                                     del_list, num_del, NULL);
                        num_del = 0;
 
-                       if (ret)
+                       if (aq_ret)
                                dev_info(&pf->pdev->dev,
                                         "ignoring delete macvlan error, err %d, aq_err %d\n",
-                                        ret, pf->hw.aq.asq_last_status);
+                                        aq_ret, pf->hw.aq.asq_last_status);
                }
 
                kfree(del_list);
@@ -1515,32 +1515,30 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
 
                        /* flush a full buffer */
                        if (num_add == filter_list_len) {
-                               ret = i40e_aq_add_macvlan(&pf->hw,
-                                                         vsi->seid,
-                                                         add_list,
-                                                         num_add,
-                                                         NULL);
+                               aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
+                                                            add_list, num_add,
+                                                            NULL);
                                num_add = 0;
 
-                               if (ret)
+                               if (aq_ret)
                                        break;
                                memset(add_list, 0, sizeof(*add_list));
                        }
                }
                if (num_add) {
-                       ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
-                                                 add_list, num_add, NULL);
+                       aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
+                                                    add_list, num_add, NULL);
                        num_add = 0;
                }
                kfree(add_list);
                add_list = NULL;
 
-               if (add_happened && (!ret)) {
+               if (add_happened && (!aq_ret)) {
                        /* do nothing */;
-               } else if (add_happened && (ret)) {
+               } else if (add_happened && (aq_ret)) {
                        dev_info(&pf->pdev->dev,
                                 "add filter failed, err %d, aq_err %d\n",
-                                ret, pf->hw.aq.asq_last_status);
+                                aq_ret, pf->hw.aq.asq_last_status);
                        if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) &&
                            !test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
                                      &vsi->state)) {
@@ -1556,28 +1554,27 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
        if (changed_flags & IFF_ALLMULTI) {
                bool cur_multipromisc;
                cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
-               ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
-                                                           vsi->seid,
-                                                           cur_multipromisc,
-                                                           NULL);
-               if (ret)
+               aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
+                                                              vsi->seid,
+                                                              cur_multipromisc,
+                                                              NULL);
+               if (aq_ret)
                        dev_info(&pf->pdev->dev,
                                 "set multi promisc failed, err %d, aq_err %d\n",
-                                ret, pf->hw.aq.asq_last_status);
+                                aq_ret, pf->hw.aq.asq_last_status);
        }
        if ((changed_flags & IFF_PROMISC) || promisc_forced_on) {
                bool cur_promisc;
                cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
                               test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
                                        &vsi->state));
-               ret = i40e_aq_set_vsi_unicast_promiscuous(&vsi->back->hw,
-                                                         vsi->seid,
-                                                         cur_promisc,
-                                                         NULL);
-               if (ret)
+               aq_ret = i40e_aq_set_vsi_unicast_promiscuous(&vsi->back->hw,
+                                                            vsi->seid,
+                                                            cur_promisc, NULL);
+               if (aq_ret)
                        dev_info(&pf->pdev->dev,
                                 "set uni promisc failed, err %d, aq_err %d\n",
-                                ret, pf->hw.aq.asq_last_status);
+                                aq_ret, pf->hw.aq.asq_last_status);
        }
 
        clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
@@ -1790,6 +1787,8 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
  * i40e_vsi_kill_vlan - Remove vsi membership for given vlan
  * @vsi: the vsi being configured
  * @vid: vlan id to be removed (0 = untagged only , -1 = any)
+ *
+ * Return: 0 on success or negative otherwise
  **/
 int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
 {
@@ -1863,37 +1862,39 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
  * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
  * @netdev: network interface to be adjusted
  * @vid: vlan id to be added
+ *
+ * net_device_ops implementation for adding vlan ids
  **/
 static int i40e_vlan_rx_add_vid(struct net_device *netdev,
                                __always_unused __be16 proto, u16 vid)
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
-       int ret;
+       int ret = 0;
 
        if (vid > 4095)
-               return 0;
+               return -EINVAL;
+
+       netdev_info(netdev, "adding %pM vid=%d\n", netdev->dev_addr, vid);
 
-       netdev_info(vsi->netdev, "adding %pM vid=%d\n",
-                   netdev->dev_addr, vid);
        /* If the network stack called us with vid = 0, we should
         * indicate to i40e_vsi_add_vlan() that we want to receive
         * any traffic (i.e. with any vlan tag, or untagged)
         */
        ret = i40e_vsi_add_vlan(vsi, vid ? vid : I40E_VLAN_ANY);
 
-       if (!ret) {
-               if (vid < VLAN_N_VID)
-                       set_bit(vid, vsi->active_vlans);
-       }
+       if (!ret && (vid < VLAN_N_VID))
+               set_bit(vid, vsi->active_vlans);
 
-       return 0;
+       return ret;
 }
 
 /**
  * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
  * @netdev: network interface to be adjusted
  * @vid: vlan id to be removed
+ *
+ * net_device_ops implementation for adding vlan ids
  **/
 static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
                                 __always_unused __be16 proto, u16 vid)
@@ -1901,15 +1902,16 @@ static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
 
-       netdev_info(vsi->netdev, "removing %pM vid=%d\n",
-                   netdev->dev_addr, vid);
+       netdev_info(netdev, "removing %pM vid=%d\n", netdev->dev_addr, vid);
+
        /* return code is ignored as there is nothing a user
         * can do about failure to remove and a log message was
-        * already printed from another function
+        * already printed from the other function
         */
        i40e_vsi_kill_vlan(vsi, vid);
 
        clear_bit(vid, vsi->active_vlans);
+
        return 0;
 }
 
@@ -1936,10 +1938,10 @@ static void i40e_restore_vlan(struct i40e_vsi *vsi)
  * @vsi: the vsi being adjusted
  * @vid: the vlan id to set as a PVID
  **/
-i40e_status i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
+int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
 {
        struct i40e_vsi_context ctxt;
-       i40e_status ret;
+       i40e_status aq_ret;
 
        vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
        vsi->info.pvid = cpu_to_le16(vid);
@@ -1948,14 +1950,15 @@ i40e_status i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
 
        ctxt.seid = vsi->seid;
        memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
-       ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
-       if (ret) {
+       aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+       if (aq_ret) {
                dev_info(&vsi->back->pdev->dev,
                         "%s: update vsi failed, aq_err=%d\n",
                         __func__, vsi->back->hw.aq.asq_last_status);
+               return -ENOENT;
        }
 
-       return ret;
+       return 0;
 }
 
 /**
@@ -3326,7 +3329,8 @@ static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
  **/
 static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
 {
-       int num_tc = 0, i;
+       u8 num_tc = 0;
+       int i;
 
        /* Scan the ETS Config Priority Table to find
         * traffic class enabled for a given priority
@@ -3341,9 +3345,7 @@ static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
        /* Traffic class index starts from zero so
         * increment to return the actual count
         */
-       num_tc++;
-
-       return num_tc;
+       return num_tc + 1;
 }
 
 /**
@@ -3451,28 +3453,27 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
        struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
        struct i40e_pf *pf = vsi->back;
        struct i40e_hw *hw = &pf->hw;
+       i40e_status aq_ret;
        u32 tc_bw_max;
-       int ret;
        int i;
 
        /* Get the VSI level BW configuration */
-       ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
-       if (ret) {
+       aq_ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
+       if (aq_ret) {
                dev_info(&pf->pdev->dev,
                         "couldn't get pf vsi bw config, err %d, aq_err %d\n",
-                        ret, pf->hw.aq.asq_last_status);
-               return ret;
+                        aq_ret, pf->hw.aq.asq_last_status);
+               return -EINVAL;
        }
 
        /* Get the VSI level BW configuration per TC */
-       ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid,
-                                              &bw_ets_config,
-                                              NULL);
-       if (ret) {
+       aq_ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
+                                                 NULL);
+       if (aq_ret) {
                dev_info(&pf->pdev->dev,
                         "couldn't get pf vsi ets bw config, err %d, aq_err %d\n",
-                        ret, pf->hw.aq.asq_last_status);
-               return ret;
+                        aq_ret, pf->hw.aq.asq_last_status);
+               return -EINVAL;
        }
 
        if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
@@ -3494,7 +3495,8 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
                /* 3 bits out of 4 for each TC */
                vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
        }
-       return ret;
+
+       return 0;
 }
 
 /**
@@ -3505,30 +3507,30 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
  *
  * Returns 0 on success, negative value on failure
  **/
-static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi,
-                                      u8 enabled_tc,
+static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
                                       u8 *bw_share)
 {
        struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
-       int i, ret = 0;
+       i40e_status aq_ret;
+       int i;
 
        bw_data.tc_valid_bits = enabled_tc;
        for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
                bw_data.tc_bw_credits[i] = bw_share[i];
 
-       ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid,
-                                      &bw_data, NULL);
-       if (ret) {
+       aq_ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
+                                         NULL);
+       if (aq_ret) {
                dev_info(&vsi->back->pdev->dev,
                         "%s: AQ command Config VSI BW allocation per TC failed = %d\n",
                         __func__, vsi->back->hw.aq.asq_last_status);
-               return ret;
+               return -EINVAL;
        }
 
        for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
                vsi->info.qs_handle[i] = bw_data.qs_handles[i];
 
-       return ret;
+       return 0;
 }
 
 /**
index 74a1506b42359d02305ea9d78cda7e536633559f..8c2437722aad2b32fec48bfc3f17bdf71db5c164 100644 (file)
 #ifndef _E1000_82575_H_
 #define _E1000_82575_H_
 
-extern void igb_shutdown_serdes_link_82575(struct e1000_hw *hw);
-extern void igb_power_up_serdes_link_82575(struct e1000_hw *hw);
-extern void igb_power_down_phy_copper_82575(struct e1000_hw *hw);
-extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw);
-extern s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
-                               u8 dev_addr, u8 *data);
-extern s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
-                                u8 dev_addr, u8 data);
+void igb_shutdown_serdes_link_82575(struct e1000_hw *hw);
+void igb_power_up_serdes_link_82575(struct e1000_hw *hw);
+void igb_power_down_phy_copper_82575(struct e1000_hw *hw);
+void igb_rx_fifo_flush_82575(struct e1000_hw *hw);
+s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr,
+                     u8 *data);
+s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr,
+                      u8 data);
 
 #define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \
                                      (ID_LED_DEF1_DEF2 <<  8) | \
index 37a9c06a6c6816bf3f87c82986851504d79d7b71..2e166b22d52b6374e3814a9526d6cad9decfa5a3 100644 (file)
@@ -562,11 +562,11 @@ struct e1000_hw {
        u8  revision_id;
 };
 
-extern struct net_device *igb_get_hw_dev(struct e1000_hw *hw);
+struct net_device *igb_get_hw_dev(struct e1000_hw *hw);
 #define hw_dbg(format, arg...) \
        netdev_dbg(igb_get_hw_dev(hw), format, ##arg)
 
 /* These functions must be implemented by drivers */
-s32  igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
-s32  igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
+s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
+s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
 #endif /* _E1000_HW_H_ */
index dde3c4b7ea9971db46981bc319faeb5fe56e452c..2d913716573a29a830610de841164e30e505cdfb 100644 (file)
 #ifndef _E1000_I210_H_
 #define _E1000_I210_H_
 
-extern s32 igb_update_flash_i210(struct e1000_hw *hw);
-extern s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw);
-extern s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw);
-extern s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset,
-                             u16 words, u16 *data);
-extern s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset,
-                            u16 words, u16 *data);
-extern s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
-extern void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
-extern s32 igb_acquire_nvm_i210(struct e1000_hw *hw);
-extern void igb_release_nvm_i210(struct e1000_hw *hw);
-extern s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data);
-extern s32 igb_read_invm_version(struct e1000_hw *hw,
-                                struct e1000_fw_version *invm_ver);
-extern s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr,
-                             u16 *data);
-extern s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr,
-                              u16 data);
-extern s32 igb_init_nvm_params_i210(struct e1000_hw *hw);
-extern bool igb_get_flash_presence_i210(struct e1000_hw *hw);
+s32 igb_update_flash_i210(struct e1000_hw *hw);
+s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw);
+s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw);
+s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
+                           u16 *data);
+s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
+                          u16 *data);
+s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
+void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
+s32 igb_acquire_nvm_i210(struct e1000_hw *hw);
+void igb_release_nvm_i210(struct e1000_hw *hw);
+s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data);
+s32 igb_read_invm_version(struct e1000_hw *hw,
+                         struct e1000_fw_version *invm_ver);
+s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data);
+s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data);
+s32 igb_init_nvm_params_i210(struct e1000_hw *hw);
+bool igb_get_flash_presence_i210(struct e1000_hw *hw);
 
 #define E1000_STM_OPCODE               0xDB00
 #define E1000_EEPROM_FLASH_SIZE_WORD   0x11
index 5e13e83cc608358273d3e7dadb6bf488e426127b..e4cbe8ef67b3094b32b73cbccda9be5673243d1f 100644 (file)
@@ -86,6 +86,6 @@ enum e1000_mng_mode {
 
 #define E1000_MNG_DHCP_COOKIE_STATUS_VLAN      0x2
 
-extern void e1000_init_function_pointers_82575(struct e1000_hw *hw);
+void e1000_init_function_pointers_82575(struct e1000_hw *hw);
 
 #endif
index 6807b098edaee27c0343f3903a080c08604ea3d5..5e9ed89403aa45adb9238330e4937e8aa1f4cd09 100644 (file)
@@ -483,40 +483,38 @@ enum igb_boards {
 extern char igb_driver_name[];
 extern char igb_driver_version[];
 
-extern int igb_up(struct igb_adapter *);
-extern void igb_down(struct igb_adapter *);
-extern void igb_reinit_locked(struct igb_adapter *);
-extern void igb_reset(struct igb_adapter *);
-extern void igb_write_rss_indir_tbl(struct igb_adapter *);
-extern int igb_set_spd_dplx(struct igb_adapter *, u32, u8);
-extern int igb_setup_tx_resources(struct igb_ring *);
-extern int igb_setup_rx_resources(struct igb_ring *);
-extern void igb_free_tx_resources(struct igb_ring *);
-extern void igb_free_rx_resources(struct igb_ring *);
-extern void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *);
-extern void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *);
-extern void igb_setup_tctl(struct igb_adapter *);
-extern void igb_setup_rctl(struct igb_adapter *);
-extern netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *);
-extern void igb_unmap_and_free_tx_resource(struct igb_ring *,
-                                          struct igb_tx_buffer *);
-extern void igb_alloc_rx_buffers(struct igb_ring *, u16);
-extern void igb_update_stats(struct igb_adapter *, struct rtnl_link_stats64 *);
-extern bool igb_has_link(struct igb_adapter *adapter);
-extern void igb_set_ethtool_ops(struct net_device *);
-extern void igb_power_up_link(struct igb_adapter *);
-extern void igb_set_fw_version(struct igb_adapter *);
-extern void igb_ptp_init(struct igb_adapter *adapter);
-extern void igb_ptp_stop(struct igb_adapter *adapter);
-extern void igb_ptp_reset(struct igb_adapter *adapter);
-extern void igb_ptp_tx_work(struct work_struct *work);
-extern void igb_ptp_rx_hang(struct igb_adapter *adapter);
-extern void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
-extern void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
-                               struct sk_buff *skb);
-extern void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector,
-                               unsigned char *va,
-                               struct sk_buff *skb);
+int igb_up(struct igb_adapter *);
+void igb_down(struct igb_adapter *);
+void igb_reinit_locked(struct igb_adapter *);
+void igb_reset(struct igb_adapter *);
+int igb_reinit_queues(struct igb_adapter *);
+void igb_write_rss_indir_tbl(struct igb_adapter *);
+int igb_set_spd_dplx(struct igb_adapter *, u32, u8);
+int igb_setup_tx_resources(struct igb_ring *);
+int igb_setup_rx_resources(struct igb_ring *);
+void igb_free_tx_resources(struct igb_ring *);
+void igb_free_rx_resources(struct igb_ring *);
+void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *);
+void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *);
+void igb_setup_tctl(struct igb_adapter *);
+void igb_setup_rctl(struct igb_adapter *);
+netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *);
+void igb_unmap_and_free_tx_resource(struct igb_ring *, struct igb_tx_buffer *);
+void igb_alloc_rx_buffers(struct igb_ring *, u16);
+void igb_update_stats(struct igb_adapter *, struct rtnl_link_stats64 *);
+bool igb_has_link(struct igb_adapter *adapter);
+void igb_set_ethtool_ops(struct net_device *);
+void igb_power_up_link(struct igb_adapter *);
+void igb_set_fw_version(struct igb_adapter *);
+void igb_ptp_init(struct igb_adapter *adapter);
+void igb_ptp_stop(struct igb_adapter *adapter);
+void igb_ptp_reset(struct igb_adapter *adapter);
+void igb_ptp_tx_work(struct work_struct *work);
+void igb_ptp_rx_hang(struct igb_adapter *adapter);
+void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
+void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb);
+void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, unsigned char *va,
+                        struct sk_buff *skb);
 static inline void igb_ptp_rx_hwtstamp(struct igb_ring *rx_ring,
                                       union e1000_adv_rx_desc *rx_desc,
                                       struct sk_buff *skb)
@@ -531,11 +529,11 @@ static inline void igb_ptp_rx_hwtstamp(struct igb_ring *rx_ring,
        rx_ring->last_rx_timestamp = jiffies;
 }
 
-extern int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
-                                 struct ifreq *ifr, int cmd);
+int igb_ptp_hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr,
+                          int cmd);
 #ifdef CONFIG_IGB_HWMON
-extern void igb_sysfs_exit(struct igb_adapter *adapter);
-extern int igb_sysfs_init(struct igb_adapter *adapter);
+void igb_sysfs_exit(struct igb_adapter *adapter);
+int igb_sysfs_init(struct igb_adapter *adapter);
 #endif
 static inline s32 igb_reset_phy(struct e1000_hw *hw)
 {
index 48cbc833b051b3861a4568e72becf67eeaa357e4..ebdac027350197834e799af74ed66d94c3dcbaff 100644 (file)
@@ -1607,6 +1607,9 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
                        igb_write_phy_reg(hw, I347AT4_PAGE_SELECT, 0);
                        igb_write_phy_reg(hw, PHY_CONTROL, 0x4140);
                }
+       } else if (hw->phy.type == e1000_phy_82580) {
+               /* enable MII loopback */
+               igb_write_phy_reg(hw, I82580_PHY_LBK_CTRL, 0x8041);
        }
 
        /* add small delay to avoid loopback test failure */
@@ -1656,7 +1659,8 @@ static int igb_setup_loopback_test(struct igb_adapter *adapter)
                if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) ||
                (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) ||
                (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) ||
-               (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP)) {
+               (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP) ||
+               (hw->device_id == E1000_DEV_ID_I354_SGMII)) {
 
                        /* Enable DH89xxCC MPHY for near end loopback */
                        reg = rd32(E1000_MPHY_ADDR_CTL);
@@ -1722,7 +1726,8 @@ static void igb_loopback_cleanup(struct igb_adapter *adapter)
        if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) ||
        (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) ||
        (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) ||
-       (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP)) {
+       (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP) ||
+       (hw->device_id == E1000_DEV_ID_I354_SGMII)) {
                u32 reg;
 
                /* Disable near end loopback on DH89xxCC */
@@ -2872,6 +2877,88 @@ static int igb_set_rxfh_indir(struct net_device *netdev, const u32 *indir)
        return 0;
 }
 
+static unsigned int igb_max_channels(struct igb_adapter *adapter)
+{
+       struct e1000_hw *hw = &adapter->hw;
+       unsigned int max_combined = 0;
+
+       switch (hw->mac.type) {
+       case e1000_i211:
+               max_combined = IGB_MAX_RX_QUEUES_I211;
+               break;
+       case e1000_82575:
+       case e1000_i210:
+               max_combined = IGB_MAX_RX_QUEUES_82575;
+               break;
+       case e1000_i350:
+               if (!!adapter->vfs_allocated_count) {
+                       max_combined = 1;
+                       break;
+               }
+               /* fall through */
+       case e1000_82576:
+               if (!!adapter->vfs_allocated_count) {
+                       max_combined = 2;
+                       break;
+               }
+               /* fall through */
+       case e1000_82580:
+       case e1000_i354:
+       default:
+               max_combined = IGB_MAX_RX_QUEUES;
+               break;
+       }
+
+       return max_combined;
+}
+
+static void igb_get_channels(struct net_device *netdev,
+                            struct ethtool_channels *ch)
+{
+       struct igb_adapter *adapter = netdev_priv(netdev);
+
+       /* Report maximum channels */
+       ch->max_combined = igb_max_channels(adapter);
+
+       /* Report info for other vector */
+       if (adapter->msix_entries) {
+               ch->max_other = NON_Q_VECTORS;
+               ch->other_count = NON_Q_VECTORS;
+       }
+
+       ch->combined_count = adapter->rss_queues;
+}
+
+static int igb_set_channels(struct net_device *netdev,
+                           struct ethtool_channels *ch)
+{
+       struct igb_adapter *adapter = netdev_priv(netdev);
+       unsigned int count = ch->combined_count;
+
+       /* Verify they are not requesting separate vectors */
+       if (!count || ch->rx_count || ch->tx_count)
+               return -EINVAL;
+
+       /* Verify other_count is valid and has not been changed */
+       if (ch->other_count != NON_Q_VECTORS)
+               return -EINVAL;
+
+       /* Verify the number of channels doesn't exceed hw limits */
+       if (count > igb_max_channels(adapter))
+               return -EINVAL;
+
+       if (count != adapter->rss_queues) {
+               adapter->rss_queues = count;
+
+               /* Hardware has to reinitialize queues and interrupts to
+                * match the new configuration.
+                */
+               return igb_reinit_queues(adapter);
+       }
+
+       return 0;
+}
+
 static const struct ethtool_ops igb_ethtool_ops = {
        .get_settings           = igb_get_settings,
        .set_settings           = igb_set_settings,
@@ -2908,6 +2995,8 @@ static const struct ethtool_ops igb_ethtool_ops = {
        .get_rxfh_indir_size    = igb_get_rxfh_indir_size,
        .get_rxfh_indir         = igb_get_rxfh_indir,
        .set_rxfh_indir         = igb_set_rxfh_indir,
+       .get_channels           = igb_get_channels,
+       .set_channels           = igb_set_channels,
        .begin                  = igb_ethtool_begin,
        .complete               = igb_ethtool_complete,
 };
index 8cf44f2a8ccd5b531f42fa0dfb357a19d6efa4f6..a505d3bad09a45df8fae391194246c02bfc3525d 100644 (file)
@@ -5708,7 +5708,7 @@ static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
 
        /* reply to reset with ack and vf mac address */
        msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
-       memcpy(addr, vf_mac, 6);
+       memcpy(addr, vf_mac, ETH_ALEN);
        igb_write_mbx(hw, msgbuf, 3, vf);
 }
 
@@ -7838,4 +7838,26 @@ s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
                return E1000_SUCCESS;
 
 }
+
+int igb_reinit_queues(struct igb_adapter *adapter)
+{
+       struct net_device *netdev = adapter->netdev;
+       struct pci_dev *pdev = adapter->pdev;
+       int err = 0;
+
+       if (netif_running(netdev))
+               igb_close(netdev);
+
+       igb_clear_interrupt_scheme(adapter);
+
+       if (igb_init_interrupt_scheme(adapter, true)) {
+               dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
+               return -ENOMEM;
+       }
+
+       if (netif_running(netdev))
+               err = igb_open(netdev);
+
+       return err;
+}
 /* igb_main.c */
index a1463e3d14c0522511c75c424ffa16bd3162411a..7d6a25c8f889efd0eb69ef749dd0bb13f18302ad 100644 (file)
@@ -312,17 +312,17 @@ enum igbvf_state_t {
 extern char igbvf_driver_name[];
 extern const char igbvf_driver_version[];
 
-extern void igbvf_check_options(struct igbvf_adapter *);
-extern void igbvf_set_ethtool_ops(struct net_device *);
-
-extern int igbvf_up(struct igbvf_adapter *);
-extern void igbvf_down(struct igbvf_adapter *);
-extern void igbvf_reinit_locked(struct igbvf_adapter *);
-extern int igbvf_setup_rx_resources(struct igbvf_adapter *, struct igbvf_ring *);
-extern int igbvf_setup_tx_resources(struct igbvf_adapter *, struct igbvf_ring *);
-extern void igbvf_free_rx_resources(struct igbvf_ring *);
-extern void igbvf_free_tx_resources(struct igbvf_ring *);
-extern void igbvf_update_stats(struct igbvf_adapter *);
+void igbvf_check_options(struct igbvf_adapter *);
+void igbvf_set_ethtool_ops(struct net_device *);
+
+int igbvf_up(struct igbvf_adapter *);
+void igbvf_down(struct igbvf_adapter *);
+void igbvf_reinit_locked(struct igbvf_adapter *);
+int igbvf_setup_rx_resources(struct igbvf_adapter *, struct igbvf_ring *);
+int igbvf_setup_tx_resources(struct igbvf_adapter *, struct igbvf_ring *);
+void igbvf_free_rx_resources(struct igbvf_ring *);
+void igbvf_free_tx_resources(struct igbvf_ring *);
+void igbvf_update_stats(struct igbvf_adapter *);
 
 extern unsigned int copybreak;
 
index eea0e10ce12f95d47ad884d48170495913f8db70..955ad8c2c53456a2bb9ce28515a00d52264ed3ee 100644 (file)
@@ -154,7 +154,7 @@ static s32 e1000_reset_hw_vf(struct e1000_hw *hw)
                ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
                if (!ret_val) {
                        if (msgbuf[0] == (E1000_VF_RESET | E1000_VT_MSGTYPE_ACK))
-                               memcpy(hw->mac.perm_addr, addr, 6);
+                               memcpy(hw->mac.perm_addr, addr, ETH_ALEN);
                        else
                                ret_val = -E1000_ERR_MAC_INIT;
                }
@@ -314,7 +314,7 @@ static void e1000_rar_set_vf(struct e1000_hw *hw, u8 * addr, u32 index)
 
        memset(msgbuf, 0, 12);
        msgbuf[0] = E1000_VF_SET_MAC_ADDR;
-       memcpy(msg_addr, addr, 6);
+       memcpy(msg_addr, addr, ETH_ALEN);
        ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
 
        if (!ret_val)
index 4d2ae97ff1b3d7380de5c2ea4653f33fda330880..2224cc2edf1396d338b9786a2dd23af8b77aa581 100644 (file)
@@ -187,21 +187,21 @@ enum ixgb_state_t {
 };
 
 /* Exported from other modules */
-extern void ixgb_check_options(struct ixgb_adapter *adapter);
-extern void ixgb_set_ethtool_ops(struct net_device *netdev);
+void ixgb_check_options(struct ixgb_adapter *adapter);
+void ixgb_set_ethtool_ops(struct net_device *netdev);
 extern char ixgb_driver_name[];
 extern const char ixgb_driver_version[];
 
-extern void ixgb_set_speed_duplex(struct net_device *netdev);
+void ixgb_set_speed_duplex(struct net_device *netdev);
 
-extern int ixgb_up(struct ixgb_adapter *adapter);
-extern void ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog);
-extern void ixgb_reset(struct ixgb_adapter *adapter);
-extern int ixgb_setup_rx_resources(struct ixgb_adapter *adapter);
-extern int ixgb_setup_tx_resources(struct ixgb_adapter *adapter);
-extern void ixgb_free_rx_resources(struct ixgb_adapter *adapter);
-extern void ixgb_free_tx_resources(struct ixgb_adapter *adapter);
-extern void ixgb_update_stats(struct ixgb_adapter *adapter);
+int ixgb_up(struct ixgb_adapter *adapter);
+void ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog);
+void ixgb_reset(struct ixgb_adapter *adapter);
+int ixgb_setup_rx_resources(struct ixgb_adapter *adapter);
+int ixgb_setup_tx_resources(struct ixgb_adapter *adapter);
+void ixgb_free_rx_resources(struct ixgb_adapter *adapter);
+void ixgb_free_tx_resources(struct ixgb_adapter *adapter);
+void ixgb_update_stats(struct ixgb_adapter *adapter);
 
 
 #endif /* _IXGB_H_ */
index 2a99a35c33aa24c30a61837ae880ea373b908c14..0bd5d72e1af5b6be4873baf31431e7f5ad09a347 100644 (file)
@@ -759,27 +759,20 @@ struct ixgb_hw_stats {
 };
 
 /* Function Prototypes */
-extern bool ixgb_adapter_stop(struct ixgb_hw *hw);
-extern bool ixgb_init_hw(struct ixgb_hw *hw);
-extern bool ixgb_adapter_start(struct ixgb_hw *hw);
-extern void ixgb_check_for_link(struct ixgb_hw *hw);
-extern bool ixgb_check_for_bad_link(struct ixgb_hw *hw);
-
-extern void ixgb_rar_set(struct ixgb_hw *hw,
-                               u8 *addr,
-                               u32 index);
+bool ixgb_adapter_stop(struct ixgb_hw *hw);
+bool ixgb_init_hw(struct ixgb_hw *hw);
+bool ixgb_adapter_start(struct ixgb_hw *hw);
+void ixgb_check_for_link(struct ixgb_hw *hw);
+bool ixgb_check_for_bad_link(struct ixgb_hw *hw);
 
+void ixgb_rar_set(struct ixgb_hw *hw, u8 *addr, u32 index);
 
 /* Filters (multicast, vlan, receive) */
-extern void ixgb_mc_addr_list_update(struct ixgb_hw *hw,
-                                  u8 *mc_addr_list,
-                                  u32 mc_addr_count,
-                                  u32 pad);
+void ixgb_mc_addr_list_update(struct ixgb_hw *hw, u8 *mc_addr_list,
+                             u32 mc_addr_count, u32 pad);
 
 /* Vfta functions */
-extern void ixgb_write_vfta(struct ixgb_hw *hw,
-                                u32 offset,
-                                u32 value);
+void ixgb_write_vfta(struct ixgb_hw *hw, u32 offset, u32 value);
 
 /* Access functions to eeprom data */
 void ixgb_get_ee_mac_addr(struct ixgb_hw *hw, u8 *mac_addr);
index 0ac6b11c6e4ec323aaa50db976e2a996b1b02d19..dc1588ee264a31da9e7561904f4ced02b0861502 100644 (file)
@@ -55,7 +55,7 @@
 #include <net/busy_poll.h>
 
 #ifdef CONFIG_NET_RX_BUSY_POLL
-#define LL_EXTENDED_STATS
+#define BP_EXTENDED_STATS
 #endif
 /* common prefix used by pr_<> macros */
 #undef pr_fmt
@@ -187,11 +187,11 @@ struct ixgbe_rx_buffer {
 struct ixgbe_queue_stats {
        u64 packets;
        u64 bytes;
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
        u64 yields;
        u64 misses;
        u64 cleaned;
-#endif  /* LL_EXTENDED_STATS */
+#endif  /* BP_EXTENDED_STATS */
 };
 
 struct ixgbe_tx_queue_stats {
@@ -399,7 +399,7 @@ static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector)
                WARN_ON(q_vector->state & IXGBE_QV_STATE_NAPI);
                q_vector->state |= IXGBE_QV_STATE_NAPI_YIELD;
                rc = false;
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
                q_vector->tx.ring->stats.yields++;
 #endif
        } else
@@ -432,7 +432,7 @@ static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector)
        if ((q_vector->state & IXGBE_QV_LOCKED)) {
                q_vector->state |= IXGBE_QV_STATE_POLL_YIELD;
                rc = false;
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
                q_vector->rx.ring->stats.yields++;
 #endif
        } else
@@ -457,7 +457,7 @@ static inline bool ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector)
 }
 
 /* true if a socket is polling, even if it did not get the lock */
-static inline bool ixgbe_qv_ll_polling(struct ixgbe_q_vector *q_vector)
+static inline bool ixgbe_qv_busy_polling(struct ixgbe_q_vector *q_vector)
 {
        WARN_ON(!(q_vector->state & IXGBE_QV_LOCKED));
        return q_vector->state & IXGBE_QV_USER_PEND;
@@ -487,7 +487,7 @@ static inline bool ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector)
        return false;
 }
 
-static inline bool ixgbe_qv_ll_polling(struct ixgbe_q_vector *q_vector)
+static inline bool ixgbe_qv_busy_polling(struct ixgbe_q_vector *q_vector)
 {
        return false;
 }
@@ -786,93 +786,89 @@ extern const char ixgbe_driver_version[];
 extern char ixgbe_default_device_descr[];
 #endif /* IXGBE_FCOE */
 
-extern void ixgbe_up(struct ixgbe_adapter *adapter);
-extern void ixgbe_down(struct ixgbe_adapter *adapter);
-extern void ixgbe_reinit_locked(struct ixgbe_adapter *adapter);
-extern void ixgbe_reset(struct ixgbe_adapter *adapter);
-extern void ixgbe_set_ethtool_ops(struct net_device *netdev);
-extern int ixgbe_setup_rx_resources(struct ixgbe_ring *);
-extern int ixgbe_setup_tx_resources(struct ixgbe_ring *);
-extern void ixgbe_free_rx_resources(struct ixgbe_ring *);
-extern void ixgbe_free_tx_resources(struct ixgbe_ring *);
-extern void ixgbe_configure_rx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
-extern void ixgbe_configure_tx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
-extern void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
-                                  struct ixgbe_ring *);
-extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
-extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
-extern int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
+void ixgbe_up(struct ixgbe_adapter *adapter);
+void ixgbe_down(struct ixgbe_adapter *adapter);
+void ixgbe_reinit_locked(struct ixgbe_adapter *adapter);
+void ixgbe_reset(struct ixgbe_adapter *adapter);
+void ixgbe_set_ethtool_ops(struct net_device *netdev);
+int ixgbe_setup_rx_resources(struct ixgbe_ring *);
+int ixgbe_setup_tx_resources(struct ixgbe_ring *);
+void ixgbe_free_rx_resources(struct ixgbe_ring *);
+void ixgbe_free_tx_resources(struct ixgbe_ring *);
+void ixgbe_configure_rx_ring(struct ixgbe_adapter *, struct ixgbe_ring *);
+void ixgbe_configure_tx_ring(struct ixgbe_adapter *, struct ixgbe_ring *);
+void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_ring *);
+void ixgbe_update_stats(struct ixgbe_adapter *adapter);
+int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
+int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
                               u16 subdevice_id);
-extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
-extern netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *,
-                                        struct ixgbe_adapter *,
-                                        struct ixgbe_ring *);
-extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *,
-                                             struct ixgbe_tx_buffer *);
-extern void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
-extern void ixgbe_write_eitr(struct ixgbe_q_vector *);
-extern int ixgbe_poll(struct napi_struct *napi, int budget);
-extern int ethtool_ioctl(struct ifreq *ifr);
-extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
-extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl);
-extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl);
-extern s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
-                                                union ixgbe_atr_hash_dword input,
-                                                union ixgbe_atr_hash_dword common,
-                                                 u8 queue);
-extern s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
-                                          union ixgbe_atr_input *input_mask);
-extern s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
-                                                union ixgbe_atr_input *input,
-                                                u16 soft_id, u8 queue);
-extern s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
-                                                union ixgbe_atr_input *input,
-                                                u16 soft_id);
-extern void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
-                                                union ixgbe_atr_input *mask);
-extern bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
-extern void ixgbe_set_rx_mode(struct net_device *netdev);
+void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
+netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *, struct ixgbe_adapter *,
+                                 struct ixgbe_ring *);
+void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *,
+                                     struct ixgbe_tx_buffer *);
+void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
+void ixgbe_write_eitr(struct ixgbe_q_vector *);
+int ixgbe_poll(struct napi_struct *napi, int budget);
+int ethtool_ioctl(struct ifreq *ifr);
+s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
+s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl);
+s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl);
+s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
+                                         union ixgbe_atr_hash_dword input,
+                                         union ixgbe_atr_hash_dword common,
+                                         u8 queue);
+s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
+                                   union ixgbe_atr_input *input_mask);
+s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
+                                         union ixgbe_atr_input *input,
+                                         u16 soft_id, u8 queue);
+s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
+                                         union ixgbe_atr_input *input,
+                                         u16 soft_id);
+void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
+                                         union ixgbe_atr_input *mask);
+bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
+void ixgbe_set_rx_mode(struct net_device *netdev);
 #ifdef CONFIG_IXGBE_DCB
-extern void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter);
+void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter);
 #endif
-extern int ixgbe_setup_tc(struct net_device *dev, u8 tc);
-extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32);
-extern void ixgbe_do_reset(struct net_device *netdev);
+int ixgbe_setup_tc(struct net_device *dev, u8 tc);
+void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32);
+void ixgbe_do_reset(struct net_device *netdev);
 #ifdef CONFIG_IXGBE_HWMON
-extern void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter);
-extern int ixgbe_sysfs_init(struct ixgbe_adapter *adapter);
+void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter);
+int ixgbe_sysfs_init(struct ixgbe_adapter *adapter);
 #endif /* CONFIG_IXGBE_HWMON */
 #ifdef IXGBE_FCOE
-extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
-extern int ixgbe_fso(struct ixgbe_ring *tx_ring,
-                    struct ixgbe_tx_buffer *first,
-                    u8 *hdr_len);
-extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
-                         union ixgbe_adv_rx_desc *rx_desc,
-                         struct sk_buff *skb);
-extern int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
-                              struct scatterlist *sgl, unsigned int sgc);
-extern int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
-                                struct scatterlist *sgl, unsigned int sgc);
-extern int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid);
-extern int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
-extern void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
-extern int ixgbe_fcoe_enable(struct net_device *netdev);
-extern int ixgbe_fcoe_disable(struct net_device *netdev);
+void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
+int ixgbe_fso(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first,
+             u8 *hdr_len);
+int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
+                  union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb);
+int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
+                      struct scatterlist *sgl, unsigned int sgc);
+int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
+                         struct scatterlist *sgl, unsigned int sgc);
+int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid);
+int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
+void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
+int ixgbe_fcoe_enable(struct net_device *netdev);
+int ixgbe_fcoe_disable(struct net_device *netdev);
 #ifdef CONFIG_IXGBE_DCB
-extern u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter);
-extern u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up);
+u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter);
+u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up);
 #endif /* CONFIG_IXGBE_DCB */
-extern int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type);
-extern int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
-                                 struct netdev_fcoe_hbainfo *info);
-extern u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter);
+int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type);
+int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
+                          struct netdev_fcoe_hbainfo *info);
+u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter);
 #endif /* IXGBE_FCOE */
 #ifdef CONFIG_DEBUG_FS
-extern void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter);
-extern void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter);
-extern void ixgbe_dbg_init(void);
-extern void ixgbe_dbg_exit(void);
+void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter);
+void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter);
+void ixgbe_dbg_init(void);
+void ixgbe_dbg_exit(void);
 #else
 static inline void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter) {}
 static inline void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter) {}
@@ -884,12 +880,12 @@ static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring)
        return netdev_get_tx_queue(ring->netdev, ring->queue_index);
 }
 
-extern void ixgbe_ptp_init(struct ixgbe_adapter *adapter);
-extern void ixgbe_ptp_stop(struct ixgbe_adapter *adapter);
-extern void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter);
-extern void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter);
-extern void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
-                                   struct sk_buff *skb);
+void ixgbe_ptp_init(struct ixgbe_adapter *adapter);
+void ixgbe_ptp_stop(struct ixgbe_adapter *adapter);
+void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter);
+void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter);
+void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
+                            struct sk_buff *skb);
 static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring,
                                         union ixgbe_adv_rx_desc *rx_desc,
                                         struct sk_buff *skb)
@@ -906,11 +902,11 @@ static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring,
        rx_ring->last_rx_timestamp = jiffies;
 }
 
-extern int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
-                                   struct ifreq *ifr, int cmd);
-extern void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter);
-extern void ixgbe_ptp_reset(struct ixgbe_adapter *adapter);
-extern void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr);
+int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter, struct ifreq *ifr,
+                            int cmd);
+void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter);
+void ixgbe_ptp_reset(struct ixgbe_adapter *adapter);
+void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr);
 #ifdef CONFIG_PCI_IOV
 void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter);
 #endif
index e8649abf97c0dd93152d7037bae870eaf3532954..90aac31b3551746d8fd3bd95d61d5000d2fc4a1a 100644 (file)
@@ -442,7 +442,7 @@ static void ixgbe_set_msglevel(struct net_device *netdev, u32 data)
 
 static int ixgbe_get_regs_len(struct net_device *netdev)
 {
-#define IXGBE_REGS_LEN  1129
+#define IXGBE_REGS_LEN  1139
        return IXGBE_REGS_LEN * sizeof(u32);
 }
 
@@ -602,22 +602,53 @@ static void ixgbe_get_regs(struct net_device *netdev,
        regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0));
 
        /* DCB */
-       regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS);
-       regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
-       regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS);
-       regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR);
-       for (i = 0; i < 8; i++)
-               regs_buff[833 + i] = IXGBE_READ_REG(hw, IXGBE_RT2CR(i));
-       for (i = 0; i < 8; i++)
-               regs_buff[841 + i] = IXGBE_READ_REG(hw, IXGBE_RT2SR(i));
-       for (i = 0; i < 8; i++)
-               regs_buff[849 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i));
-       for (i = 0; i < 8; i++)
-               regs_buff[857 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i));
+       regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS);   /* same as FCCFG  */
+       regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS); /* same as RTTPCS */
+
+       switch (hw->mac.type) {
+       case ixgbe_mac_82598EB:
+               regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
+               regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR);
+               for (i = 0; i < 8; i++)
+                       regs_buff[833 + i] =
+                               IXGBE_READ_REG(hw, IXGBE_RT2CR(i));
+               for (i = 0; i < 8; i++)
+                       regs_buff[841 + i] =
+                               IXGBE_READ_REG(hw, IXGBE_RT2SR(i));
+               for (i = 0; i < 8; i++)
+                       regs_buff[849 + i] =
+                               IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i));
+               for (i = 0; i < 8; i++)
+                       regs_buff[857 + i] =
+                               IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i));
+               break;
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
+               regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
+               regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RTRPCS);
+               for (i = 0; i < 8; i++)
+                       regs_buff[833 + i] =
+                               IXGBE_READ_REG(hw, IXGBE_RTRPT4C(i));
+               for (i = 0; i < 8; i++)
+                       regs_buff[841 + i] =
+                               IXGBE_READ_REG(hw, IXGBE_RTRPT4S(i));
+               for (i = 0; i < 8; i++)
+                       regs_buff[849 + i] =
+                               IXGBE_READ_REG(hw, IXGBE_RTTDT2C(i));
+               for (i = 0; i < 8; i++)
+                       regs_buff[857 + i] =
+                               IXGBE_READ_REG(hw, IXGBE_RTTDT2S(i));
+               break;
+       default:
+               break;
+       }
+
        for (i = 0; i < 8; i++)
-               regs_buff[865 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i));
+               regs_buff[865 + i] =
+               IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i)); /* same as RTTPT2C */
        for (i = 0; i < 8; i++)
-               regs_buff[873 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i));
+               regs_buff[873 + i] =
+               IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i)); /* same as RTTPT2S */
 
        /* Statistics */
        regs_buff[881] = IXGBE_GET_STAT(adapter, crcerrs);
@@ -757,6 +788,20 @@ static void ixgbe_get_regs(struct net_device *netdev,
 
        /* 82599 X540 specific registers  */
        regs_buff[1128] = IXGBE_READ_REG(hw, IXGBE_MFLCN);
+
+       /* 82599 X540 specific DCB registers  */
+       regs_buff[1129] = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
+       regs_buff[1130] = IXGBE_READ_REG(hw, IXGBE_RTTUP2TC);
+       for (i = 0; i < 4; i++)
+               regs_buff[1131 + i] = IXGBE_READ_REG(hw, IXGBE_TXLLQ(i));
+       regs_buff[1135] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRM);
+                                       /* same as RTTQCNRM */
+       regs_buff[1136] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRD);
+                                       /* same as RTTQCNRR */
+
+       /* X540 specific DCB registers  */
+       regs_buff[1137] = IXGBE_READ_REG(hw, IXGBE_RTTQCNCR);
+       regs_buff[1138] = IXGBE_READ_REG(hw, IXGBE_RTTQCNTG);
 }
 
 static int ixgbe_get_eeprom_len(struct net_device *netdev)
@@ -1072,7 +1117,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
                        data[i] = 0;
                        data[i+1] = 0;
                        i += 2;
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
                        data[i] = 0;
                        data[i+1] = 0;
                        data[i+2] = 0;
@@ -1087,7 +1132,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
                        data[i+1] = ring->stats.bytes;
                } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
                i += 2;
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
                data[i] = ring->stats.yields;
                data[i+1] = ring->stats.misses;
                data[i+2] = ring->stats.cleaned;
@@ -1100,7 +1145,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
                        data[i] = 0;
                        data[i+1] = 0;
                        i += 2;
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
                        data[i] = 0;
                        data[i+1] = 0;
                        data[i+2] = 0;
@@ -1115,7 +1160,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
                        data[i+1] = ring->stats.bytes;
                } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
                i += 2;
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
                data[i] = ring->stats.yields;
                data[i+1] = ring->stats.misses;
                data[i+2] = ring->stats.cleaned;
@@ -1157,28 +1202,28 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
                        p += ETH_GSTRING_LEN;
                        sprintf(p, "tx_queue_%u_bytes", i);
                        p += ETH_GSTRING_LEN;
-#ifdef LL_EXTENDED_STATS
-                       sprintf(p, "tx_queue_%u_ll_napi_yield", i);
+#ifdef BP_EXTENDED_STATS
+                       sprintf(p, "tx_queue_%u_bp_napi_yield", i);
                        p += ETH_GSTRING_LEN;
-                       sprintf(p, "tx_queue_%u_ll_misses", i);
+                       sprintf(p, "tx_queue_%u_bp_misses", i);
                        p += ETH_GSTRING_LEN;
-                       sprintf(p, "tx_queue_%u_ll_cleaned", i);
+                       sprintf(p, "tx_queue_%u_bp_cleaned", i);
                        p += ETH_GSTRING_LEN;
-#endif /* LL_EXTENDED_STATS */
+#endif /* BP_EXTENDED_STATS */
                }
                for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) {
                        sprintf(p, "rx_queue_%u_packets", i);
                        p += ETH_GSTRING_LEN;
                        sprintf(p, "rx_queue_%u_bytes", i);
                        p += ETH_GSTRING_LEN;
-#ifdef LL_EXTENDED_STATS
-                       sprintf(p, "rx_queue_%u_ll_poll_yield", i);
+#ifdef BP_EXTENDED_STATS
+                       sprintf(p, "rx_queue_%u_bp_poll_yield", i);
                        p += ETH_GSTRING_LEN;
-                       sprintf(p, "rx_queue_%u_ll_misses", i);
+                       sprintf(p, "rx_queue_%u_bp_misses", i);
                        p += ETH_GSTRING_LEN;
-                       sprintf(p, "rx_queue_%u_ll_cleaned", i);
+                       sprintf(p, "rx_queue_%u_bp_cleaned", i);
                        p += ETH_GSTRING_LEN;
-#endif /* LL_EXTENDED_STATS */
+#endif /* BP_EXTENDED_STATS */
                }
                for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
                        sprintf(p, "tx_pb_%u_pxon", i);
index 0ade0cd5ef53ffab28b3fd34136374bfe9f4b51e..43b777aad2880857682353f83cd946048ee2278b 100644 (file)
@@ -1585,7 +1585,7 @@ static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
 {
        struct ixgbe_adapter *adapter = q_vector->adapter;
 
-       if (ixgbe_qv_ll_polling(q_vector))
+       if (ixgbe_qv_busy_polling(q_vector))
                netif_receive_skb(skb);
        else if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
                napi_gro_receive(&q_vector->napi, skb);
@@ -2097,7 +2097,7 @@ static int ixgbe_low_latency_recv(struct napi_struct *napi)
 
        ixgbe_for_each_ring(ring, q_vector->rx) {
                found = ixgbe_clean_rx_irq(q_vector, ring, 4);
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
                if (found)
                        ring->stats.cleaned += found;
                else
index 24af12e3719e00c8345ba750399226a8522105de..aae900a256da98601a501a121eed1f14347d3997 100644 (file)
 #define IXGBE_SFF_QSFP_DEVICE_TECH     0x93
 
 /* Bitmasks */
-#define IXGBE_SFF_DA_PASSIVE_CABLE           0x4
-#define IXGBE_SFF_DA_ACTIVE_CABLE            0x8
-#define IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING    0x4
-#define IXGBE_SFF_1GBASESX_CAPABLE           0x1
-#define IXGBE_SFF_1GBASELX_CAPABLE           0x2
-#define IXGBE_SFF_1GBASET_CAPABLE            0x8
-#define IXGBE_SFF_10GBASESR_CAPABLE          0x10
-#define IXGBE_SFF_10GBASELR_CAPABLE          0x20
-#define IXGBE_SFF_SOFT_RS_SELECT_MASK  0x8
-#define IXGBE_SFF_SOFT_RS_SELECT_10G   0x8
-#define IXGBE_SFF_SOFT_RS_SELECT_1G    0x0
-#define IXGBE_SFF_ADDRESSING_MODE           0x4
-#define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE       0x1
-#define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE      0x8
+#define IXGBE_SFF_DA_PASSIVE_CABLE             0x4
+#define IXGBE_SFF_DA_ACTIVE_CABLE              0x8
+#define IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING      0x4
+#define IXGBE_SFF_1GBASESX_CAPABLE             0x1
+#define IXGBE_SFF_1GBASELX_CAPABLE             0x2
+#define IXGBE_SFF_1GBASET_CAPABLE              0x8
+#define IXGBE_SFF_10GBASESR_CAPABLE            0x10
+#define IXGBE_SFF_10GBASELR_CAPABLE            0x20
+#define IXGBE_SFF_SOFT_RS_SELECT_MASK          0x8
+#define IXGBE_SFF_SOFT_RS_SELECT_10G           0x8
+#define IXGBE_SFF_SOFT_RS_SELECT_1G            0x0
+#define IXGBE_SFF_ADDRESSING_MODE              0x4
+#define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE         0x1
+#define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE                0x8
 #define IXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE 0x23
 #define IXGBE_SFF_QSFP_TRANSMITER_850NM_VCSEL  0x0
-#define IXGBE_I2C_EEPROM_READ_MASK           0x100
-#define IXGBE_I2C_EEPROM_STATUS_MASK         0x3
-#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0
-#define IXGBE_I2C_EEPROM_STATUS_PASS         0x1
-#define IXGBE_I2C_EEPROM_STATUS_FAIL         0x2
-#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS  0x3
+#define IXGBE_I2C_EEPROM_READ_MASK             0x100
+#define IXGBE_I2C_EEPROM_STATUS_MASK           0x3
+#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION   0x0
+#define IXGBE_I2C_EEPROM_STATUS_PASS           0x1
+#define IXGBE_I2C_EEPROM_STATUS_FAIL           0x2
+#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS    0x3
 
 /* Flow control defines */
 #define IXGBE_TAF_SYM_PAUSE                  0x400
index 276d7b135332c1c1c0ec4415f0594f15ffd9f1d2..1fe7cb0142e106919e772f668fe5a5411b8ff2fc 100644 (file)
@@ -558,7 +558,7 @@ static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
        struct ixgbe_hw *hw = &adapter->hw;
        int rar_entry = hw->mac.num_rar_entries - (vf + 1);
 
-       memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, 6);
+       memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
        hw->mac.ops.set_rar(hw, rar_entry, mac_addr, vf, IXGBE_RAH_AV);
 
        return 0;
@@ -621,16 +621,13 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
 
 int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
 {
-       unsigned char vf_mac_addr[6];
        struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
        unsigned int vfn = (event_mask & 0x3f);
 
        bool enable = ((event_mask & 0x10000000U) != 0);
 
-       if (enable) {
-               eth_zero_addr(vf_mac_addr);
-               memcpy(adapter->vfinfo[vfn].vf_mac_addresses, vf_mac_addr, 6);
-       }
+       if (enable)
+               eth_zero_addr(adapter->vfinfo[vfn].vf_mac_addresses);
 
        return 0;
 }
index 10775cb9b6d84c066263c51881257bf8995e037a..7c19e969576f60160649f3ea95b6033582ed9552 100644 (file)
@@ -561,6 +561,10 @@ struct ixgbe_thermal_sensor_data {
 #define IXGBE_RTTDQSEL    0x04904
 #define IXGBE_RTTDT1C     0x04908
 #define IXGBE_RTTDT1S     0x0490C
+#define IXGBE_RTTQCNCR    0x08B00
+#define IXGBE_RTTQCNTG    0x04A90
+#define IXGBE_RTTBCNRD    0x0498C
+#define IXGBE_RTTQCNRR    0x0498C
 #define IXGBE_RTTDTECC    0x04990
 #define IXGBE_RTTDTECC_NO_BCN   0x00000100
 #define IXGBE_RTTBCNRC    0x04984
@@ -570,6 +574,7 @@ struct ixgbe_thermal_sensor_data {
 #define IXGBE_RTTBCNRC_RF_INT_MASK     \
        (IXGBE_RTTBCNRC_RF_DEC_MASK << IXGBE_RTTBCNRC_RF_INT_SHIFT)
 #define IXGBE_RTTBCNRM    0x04980
+#define IXGBE_RTTQCNRM    0x04980
 
 /* FCoE DMA Context Registers */
 #define IXGBE_FCPTRL    0x02410 /* FC User Desc. PTR Low */
index 389324f5929a5ef9fd16bac01b9a6d799943747a..24b80a6cfca4ec593f7bed4bf2e1423213125d90 100644 (file)
 #include "ixgbe.h"
 #include "ixgbe_phy.h"
 
-#define IXGBE_X540_MAX_TX_QUEUES 128
-#define IXGBE_X540_MAX_RX_QUEUES 128
-#define IXGBE_X540_RAR_ENTRIES   128
-#define IXGBE_X540_MC_TBL_SIZE   128
-#define IXGBE_X540_VFT_TBL_SIZE  128
-#define IXGBE_X540_RX_PB_SIZE   384
+#define IXGBE_X540_MAX_TX_QUEUES       128
+#define IXGBE_X540_MAX_RX_QUEUES       128
+#define IXGBE_X540_RAR_ENTRIES         128
+#define IXGBE_X540_MC_TBL_SIZE         128
+#define IXGBE_X540_VFT_TBL_SIZE                128
+#define IXGBE_X540_RX_PB_SIZE          384
 
 static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw);
 static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw);
index c9d0c12d6f04156e19ec0240c19d64c0f23ebc8a..84329b0d567a1caeb55fb197e6f4bfe0e07944f1 100644 (file)
@@ -140,58 +140,10 @@ static void ixgbevf_set_msglevel(struct net_device *netdev, u32 data)
 
 #define IXGBE_GET_STAT(_A_, _R_) (_A_->stats._R_)
 
-static char *ixgbevf_reg_names[] = {
-       "IXGBE_VFCTRL",
-       "IXGBE_VFSTATUS",
-       "IXGBE_VFLINKS",
-       "IXGBE_VFRXMEMWRAP",
-       "IXGBE_VFFRTIMER",
-       "IXGBE_VTEICR",
-       "IXGBE_VTEICS",
-       "IXGBE_VTEIMS",
-       "IXGBE_VTEIMC",
-       "IXGBE_VTEIAC",
-       "IXGBE_VTEIAM",
-       "IXGBE_VTEITR",
-       "IXGBE_VTIVAR",
-       "IXGBE_VTIVAR_MISC",
-       "IXGBE_VFRDBAL0",
-       "IXGBE_VFRDBAL1",
-       "IXGBE_VFRDBAH0",
-       "IXGBE_VFRDBAH1",
-       "IXGBE_VFRDLEN0",
-       "IXGBE_VFRDLEN1",
-       "IXGBE_VFRDH0",
-       "IXGBE_VFRDH1",
-       "IXGBE_VFRDT0",
-       "IXGBE_VFRDT1",
-       "IXGBE_VFRXDCTL0",
-       "IXGBE_VFRXDCTL1",
-       "IXGBE_VFSRRCTL0",
-       "IXGBE_VFSRRCTL1",
-       "IXGBE_VFPSRTYPE",
-       "IXGBE_VFTDBAL0",
-       "IXGBE_VFTDBAL1",
-       "IXGBE_VFTDBAH0",
-       "IXGBE_VFTDBAH1",
-       "IXGBE_VFTDLEN0",
-       "IXGBE_VFTDLEN1",
-       "IXGBE_VFTDH0",
-       "IXGBE_VFTDH1",
-       "IXGBE_VFTDT0",
-       "IXGBE_VFTDT1",
-       "IXGBE_VFTXDCTL0",
-       "IXGBE_VFTXDCTL1",
-       "IXGBE_VFTDWBAL0",
-       "IXGBE_VFTDWBAL1",
-       "IXGBE_VFTDWBAH0",
-       "IXGBE_VFTDWBAH1"
-};
-
-
 static int ixgbevf_get_regs_len(struct net_device *netdev)
 {
-       return (ARRAY_SIZE(ixgbevf_reg_names)) * sizeof(u32);
+#define IXGBE_REGS_LEN 45
+       return IXGBE_REGS_LEN * sizeof(u32);
 }
 
 static void ixgbevf_get_regs(struct net_device *netdev,
@@ -264,9 +216,6 @@ static void ixgbevf_get_regs(struct net_device *netdev,
                regs_buff[41 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAL(i));
        for (i = 0; i < 2; i++)
                regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAH(i));
-
-       for (i = 0; i < ARRAY_SIZE(ixgbevf_reg_names); i++)
-               hw_dbg(hw, "%s\t%8.8x\n", ixgbevf_reg_names[i], regs_buff[i]);
 }
 
 static void ixgbevf_get_drvinfo(struct net_device *netdev,
index fff0d98675295182fd15b7bbba36820274286ea2..64a2b912e73c4cf6c09a71571b3d6b321375ec7b 100644 (file)
@@ -281,27 +281,23 @@ extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops;
 extern const char ixgbevf_driver_name[];
 extern const char ixgbevf_driver_version[];
 
-extern void ixgbevf_up(struct ixgbevf_adapter *adapter);
-extern void ixgbevf_down(struct ixgbevf_adapter *adapter);
-extern void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter);
-extern void ixgbevf_reset(struct ixgbevf_adapter *adapter);
-extern void ixgbevf_set_ethtool_ops(struct net_device *netdev);
-extern int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *,
-                                     struct ixgbevf_ring *);
-extern int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *,
-                                     struct ixgbevf_ring *);
-extern void ixgbevf_free_rx_resources(struct ixgbevf_adapter *,
-                                     struct ixgbevf_ring *);
-extern void ixgbevf_free_tx_resources(struct ixgbevf_adapter *,
-                                     struct ixgbevf_ring *);
-extern void ixgbevf_update_stats(struct ixgbevf_adapter *adapter);
-extern int ethtool_ioctl(struct ifreq *ifr);
-
-extern void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter);
-extern void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter);
+void ixgbevf_up(struct ixgbevf_adapter *adapter);
+void ixgbevf_down(struct ixgbevf_adapter *adapter);
+void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter);
+void ixgbevf_reset(struct ixgbevf_adapter *adapter);
+void ixgbevf_set_ethtool_ops(struct net_device *netdev);
+int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *, struct ixgbevf_ring *);
+int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *, struct ixgbevf_ring *);
+void ixgbevf_free_rx_resources(struct ixgbevf_adapter *, struct ixgbevf_ring *);
+void ixgbevf_free_tx_resources(struct ixgbevf_adapter *, struct ixgbevf_ring *);
+void ixgbevf_update_stats(struct ixgbevf_adapter *adapter);
+int ethtool_ioctl(struct ifreq *ifr);
+
+void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter);
+void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter);
 
 #ifdef DEBUG
-extern char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw);
+char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw);
 #define hw_dbg(hw, format, arg...) \
        printk(KERN_DEBUG "%s: " format, ixgbevf_get_hw_dev_name(hw), ##arg)
 #else
index 59a62bbfb3714a55683bec8fcb49e9adcb5cb279..ce27d62f9c8e81ae5aeadcb79dfa998bab4bcbf8 100644 (file)
@@ -756,37 +756,12 @@ static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
 static irqreturn_t ixgbevf_msix_other(int irq, void *data)
 {
        struct ixgbevf_adapter *adapter = data;
-       struct pci_dev *pdev = adapter->pdev;
        struct ixgbe_hw *hw = &adapter->hw;
-       u32 msg;
-       bool got_ack = false;
 
        hw->mac.get_link_status = 1;
-       if (!hw->mbx.ops.check_for_ack(hw))
-               got_ack = true;
-
-       if (!hw->mbx.ops.check_for_msg(hw)) {
-               hw->mbx.ops.read(hw, &msg, 1);
-
-               if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG) {
-                       mod_timer(&adapter->watchdog_timer,
-                                 round_jiffies(jiffies + 1));
-                       adapter->link_up = false;
-               }
 
-               if (msg & IXGBE_VT_MSGTYPE_NACK)
-                       dev_info(&pdev->dev,
-                                "Last Request of type %2.2x to PF Nacked\n",
-                                msg & 0xFF);
-               hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFSTS;
-       }
-
-       /* checking for the ack clears the PFACK bit.  Place
-        * it back in the v2p_mailbox cache so that anyone
-        * polling for an ack will not miss it
-        */
-       if (got_ack)
-               hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK;
+       if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
+               mod_timer(&adapter->watchdog_timer, jiffies);
 
        IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
 
@@ -1327,27 +1302,51 @@ static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
        }
 }
 
-#define IXGBE_MAX_RX_DESC_POLL 10
-static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
-                                               int rxr)
+#define IXGBEVF_MAX_RX_DESC_POLL 10
+static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
+                                        int rxr)
 {
        struct ixgbe_hw *hw = &adapter->hw;
+       int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
+       u32 rxdctl;
        int j = adapter->rx_ring[rxr].reg_idx;
-       int k;
 
-       for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) {
-               if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) & IXGBE_RXDCTL_ENABLE)
-                       break;
-               else
-                       msleep(1);
-       }
-       if (k >= IXGBE_MAX_RX_DESC_POLL) {
-               hw_dbg(hw, "RXDCTL.ENABLE on Rx queue %d "
-                      "not set within the polling period\n", rxr);
-       }
+       do {
+               usleep_range(1000, 2000);
+               rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
+       } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
+
+       if (!wait_loop)
+               hw_dbg(hw, "RXDCTL.ENABLE queue %d not set while polling\n",
+                      rxr);
+
+       ixgbevf_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr],
+                               (adapter->rx_ring[rxr].count - 1));
+}
+
+static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
+                                    struct ixgbevf_ring *ring)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
+       u32 rxdctl;
+       u8 reg_idx = ring->reg_idx;
 
-       ixgbevf_release_rx_desc(hw, &adapter->rx_ring[rxr],
-                               adapter->rx_ring[rxr].count - 1);
+       rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
+       rxdctl &= ~IXGBE_RXDCTL_ENABLE;
+
+       /* write value back with RXDCTL.ENABLE bit cleared */
+       IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
+
+       /* the hardware may take up to 100us to really disable the rx queue */
+       do {
+               udelay(10);
+               rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
+       } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
+
+       if (!wait_loop)
+               hw_dbg(hw, "RXDCTL.ENABLE queue %d not cleared while polling\n",
+                      reg_idx);
 }
 
 static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
@@ -1545,8 +1544,6 @@ void ixgbevf_up(struct ixgbevf_adapter *adapter)
 {
        struct ixgbe_hw *hw = &adapter->hw;
 
-       ixgbevf_negotiate_api(adapter);
-
        ixgbevf_reset_queues(adapter);
 
        ixgbevf_configure(adapter);
@@ -1679,7 +1676,10 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter)
 
        /* signal that we are down to the interrupt handler */
        set_bit(__IXGBEVF_DOWN, &adapter->state);
-       /* disable receives */
+
+       /* disable all enabled rx queues */
+       for (i = 0; i < adapter->num_rx_queues; i++)
+               ixgbevf_disable_rx_queue(adapter, &adapter->rx_ring[i]);
 
        netif_tx_disable(netdev);
 
@@ -1733,10 +1733,12 @@ void ixgbevf_reset(struct ixgbevf_adapter *adapter)
        struct ixgbe_hw *hw = &adapter->hw;
        struct net_device *netdev = adapter->netdev;
 
-       if (hw->mac.ops.reset_hw(hw))
+       if (hw->mac.ops.reset_hw(hw)) {
                hw_dbg(hw, "PF still resetting\n");
-       else
+       } else {
                hw->mac.ops.init_hw(hw);
+               ixgbevf_negotiate_api(adapter);
+       }
 
        if (is_valid_ether_addr(adapter->hw.mac.addr)) {
                memcpy(netdev->dev_addr, adapter->hw.mac.addr,
@@ -2072,6 +2074,9 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
        hw->mac.max_tx_queues = 2;
        hw->mac.max_rx_queues = 2;
 
+       /* lock to protect mailbox accesses */
+       spin_lock_init(&adapter->mbx_lock);
+
        err = hw->mac.ops.reset_hw(hw);
        if (err) {
                dev_info(&pdev->dev,
@@ -2082,6 +2087,7 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
                        pr_err("init_shared_code failed: %d\n", err);
                        goto out;
                }
+               ixgbevf_negotiate_api(adapter);
                err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
                if (err)
                        dev_info(&pdev->dev, "Error reading MAC address\n");
@@ -2097,9 +2103,6 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
                memcpy(hw->mac.addr, netdev->dev_addr, netdev->addr_len);
        }
 
-       /* lock to protect mailbox accesses */
-       spin_lock_init(&adapter->mbx_lock);
-
        /* Enable dynamic interrupt throttling rates */
        adapter->rx_itr_setting = 1;
        adapter->tx_itr_setting = 1;
@@ -2620,8 +2623,6 @@ static int ixgbevf_open(struct net_device *netdev)
                }
        }
 
-       ixgbevf_negotiate_api(adapter);
-
        /* setup queue reg_idx and Rx queue count */
        err = ixgbevf_setup_queues(adapter);
        if (err)
@@ -3216,6 +3217,8 @@ static int ixgbevf_resume(struct pci_dev *pdev)
        }
        pci_set_master(pdev);
 
+       ixgbevf_reset(adapter);
+
        rtnl_lock();
        err = ixgbevf_init_interrupt_scheme(adapter);
        rtnl_unlock();
@@ -3224,8 +3227,6 @@ static int ixgbevf_resume(struct pci_dev *pdev)
                return err;
        }
 
-       ixgbevf_reset(adapter);
-
        if (netif_running(netdev)) {
                err = ixgbevf_open(netdev);
                if (err)
index 387b52635bc051259a5e5b99d9740c69bcca687a..4d44d64ae3870c42dd62bb4d1cdfeb96fe9122b9 100644 (file)
@@ -242,7 +242,7 @@ static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
        msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT;
        msgbuf[0] |= IXGBE_VF_SET_MACVLAN;
        if (addr)
-               memcpy(msg_addr, addr, 6);
+               memcpy(msg_addr, addr, ETH_ALEN);
        ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
 
        if (!ret_val)
@@ -275,7 +275,7 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
 
        memset(msgbuf, 0, sizeof(msgbuf));
        msgbuf[0] = IXGBE_VF_SET_MAC_ADDR;
-       memcpy(msg_addr, addr, 6);
+       memcpy(msg_addr, addr, ETH_ALEN);
        ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
 
        if (!ret_val)
index 23de82a9da82ff408181759505450adfd439e88d..b56d2a29cd0ed6ea5a062e477de525791ae1489d 100644 (file)
@@ -309,7 +309,7 @@ static void
 jme_load_macaddr(struct net_device *netdev)
 {
        struct jme_adapter *jme = netdev_priv(netdev);
-       unsigned char macaddr[6];
+       unsigned char macaddr[ETH_ALEN];
        u32 val;
 
        spin_lock_bh(&jme->macaddr_lock);
@@ -321,7 +321,7 @@ jme_load_macaddr(struct net_device *netdev)
        val = jread32(jme, JME_RXUMA_HI);
        macaddr[4] = (val >>  0) & 0xFF;
        macaddr[5] = (val >>  8) & 0xFF;
-       memcpy(netdev->dev_addr, macaddr, 6);
+       memcpy(netdev->dev_addr, macaddr, ETH_ALEN);
        spin_unlock_bh(&jme->macaddr_lock);
 }
 
index a36fa80968eb1a0a42e9051b8ca2ea91ece4840c..4a5e3b0f712e82e7253f92c7ef19ebd282f5dd4d 100644 (file)
@@ -1110,7 +1110,7 @@ static int korina_probe(struct platform_device *pdev)
        lp = netdev_priv(dev);
 
        bif->dev = dev;
-       memcpy(dev->dev_addr, bif->mac, 6);
+       memcpy(dev->dev_addr, bif->mac, ETH_ALEN);
 
        lp->rx_irq = platform_get_irq_byname(pdev, "korina_rx");
        lp->tx_irq = platform_get_irq_byname(pdev, "korina_tx");
index 7fb5677451f9fb7aee3ebccd31a82b476cfac4df..99f16cbf2fd84dd5b83656185fd24627227b4f54 100644 (file)
@@ -2514,7 +2514,7 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
 
        mac_addr = of_get_mac_address(pnp);
        if (mac_addr)
-               memcpy(ppd.mac_addr, mac_addr, 6);
+               memcpy(ppd.mac_addr, mac_addr, ETH_ALEN);
 
        mv643xx_eth_property(pnp, "tx-queue-size", ppd.tx_queue_size);
        mv643xx_eth_property(pnp, "tx-sram-addr", ppd.tx_sram_addr);
@@ -2696,7 +2696,7 @@ static void set_params(struct mv643xx_eth_private *mp,
        struct net_device *dev = mp->dev;
 
        if (is_valid_ether_addr(pd->mac_addr))
-               memcpy(dev->dev_addr, pd->mac_addr, 6);
+               memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
        else
                uc_addr_get(mp, dev->dev_addr);
 
index 1a9c4f6269ea8a3422781bc14f2f7164b57ff7bf..ecc7f7b696b89a122b80318a12d5f6488a190126 100644 (file)
@@ -3086,13 +3086,16 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
                                               PCI_DMA_FROMDEVICE);
                skge_rx_reuse(e, skge->rx_buf_size);
        } else {
+               struct skge_element ee;
                struct sk_buff *nskb;
 
                nskb = netdev_alloc_skb_ip_align(dev, skge->rx_buf_size);
                if (!nskb)
                        goto resubmit;
 
-               skb = e->skb;
+               ee = *e;
+
+               skb = ee.skb;
                prefetch(skb->data);
 
                if (skge_rx_setup(skge, e, nskb, skge->rx_buf_size) < 0) {
@@ -3101,8 +3104,8 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
                }
 
                pci_unmap_single(skge->hw->pdev,
-                                dma_unmap_addr(e, mapaddr),
-                                dma_unmap_len(e, maplen),
+                                dma_unmap_addr(&ee, mapaddr),
+                                dma_unmap_len(&ee, maplen),
                                 PCI_DMA_FROMDEVICE);
        }
 
index 075f4e21d33df6f4f2b749dcd4aeb9fee1405077..c83d16dc7cd56034de4bfbc7deafcad6ddb42439 100644 (file)
@@ -1248,7 +1248,7 @@ static void ks_set_mac(struct ks_net *ks, u8 *data)
        w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
        ks_wrreg16(ks, KS_MARL, w);
 
-       memcpy(ks->mac_addr, data, 6);
+       memcpy(ks->mac_addr, data, ETH_ALEN);
 
        if (ks->enabled)
                ks_start_rx(ks);
@@ -1651,7 +1651,7 @@ static int ks8851_probe(struct platform_device *pdev)
        }
        netdev_info(netdev, "Mac address is: %pM\n", ks->mac_addr);
 
-       memcpy(netdev->dev_addr, ks->mac_addr, 6);
+       memcpy(netdev->dev_addr, ks->mac_addr, ETH_ALEN);
 
        ks_set_mac(ks, netdev->dev_addr);
 
index 83c2091c9c234bfe9ecbb4067d1396aee19f49f6..bd1a2d2bc2aebbad9612d167915fd743cf973c94 100644 (file)
@@ -543,7 +543,7 @@ static const struct of_device_id moxart_mac_match[] = {
        { }
 };
 
-struct __initdata platform_driver moxart_mac_driver = {
+static struct platform_driver moxart_mac_driver = {
        .probe  = moxart_mac_probe,
        .remove = moxart_remove,
        .driver = {
index 149355b52ad0c8f2d4aac4f6cfe83041bc59f45b..1975550c3634c2b55ae63469b685870a3b0e06c5 100644 (file)
@@ -934,7 +934,7 @@ static inline void myri10ge_ss_init_lock(struct myri10ge_slice_state *ss)
 
 static inline bool myri10ge_ss_lock_napi(struct myri10ge_slice_state *ss)
 {
-       int rc = true;
+       bool rc = true;
        spin_lock(&ss->lock);
        if ((ss->state & SLICE_LOCKED)) {
                WARN_ON((ss->state & SLICE_STATE_NAPI));
@@ -957,7 +957,7 @@ static inline void myri10ge_ss_unlock_napi(struct myri10ge_slice_state *ss)
 
 static inline bool myri10ge_ss_lock_poll(struct myri10ge_slice_state *ss)
 {
-       int rc = true;
+       bool rc = true;
        spin_lock_bh(&ss->lock);
        if ((ss->state & SLICE_LOCKED)) {
                ss->state |= SLICE_STATE_POLL_YIELD;
@@ -3164,7 +3164,7 @@ static void myri10ge_set_multicast_list(struct net_device *dev)
 
        /* Walk the multicast list, and add each address */
        netdev_for_each_mc_addr(ha, dev) {
-               memcpy(data, &ha->addr, 6);
+               memcpy(data, &ha->addr, ETH_ALEN);
                cmd.data0 = ntohl(data[0]);
                cmd.data1 = ntohl(data[1]);
                err = myri10ge_send_cmd(mgp, MXGEFW_JOIN_MULTICAST_GROUP,
@@ -3207,7 +3207,7 @@ static int myri10ge_set_mac_address(struct net_device *dev, void *addr)
        }
 
        /* change the dev structure */
-       memcpy(dev->dev_addr, sa->sa_data, 6);
+       memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
        return 0;
 }
 
index 6797b1075874ae26cca6756bf450559ed7c7e6be..2a9003071d51a985be9b133fea76a585dabdf2d4 100644 (file)
@@ -653,38 +653,38 @@ struct pch_gbe_adapter {
 extern const char pch_driver_version[];
 
 /* pch_gbe_main.c */
-extern int pch_gbe_up(struct pch_gbe_adapter *adapter);
-extern void pch_gbe_down(struct pch_gbe_adapter *adapter);
-extern void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter);
-extern void pch_gbe_reset(struct pch_gbe_adapter *adapter);
-extern int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
-                                      struct pch_gbe_tx_ring *txdr);
-extern int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
-                                      struct pch_gbe_rx_ring *rxdr);
-extern void pch_gbe_free_tx_resources(struct pch_gbe_adapter *adapter,
-                                      struct pch_gbe_tx_ring *tx_ring);
-extern void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter,
-                                      struct pch_gbe_rx_ring *rx_ring);
-extern void pch_gbe_update_stats(struct pch_gbe_adapter *adapter);
-extern u32 pch_ch_control_read(struct pci_dev *pdev);
-extern void pch_ch_control_write(struct pci_dev *pdev, u32 val);
-extern u32 pch_ch_event_read(struct pci_dev *pdev);
-extern void pch_ch_event_write(struct pci_dev *pdev, u32 val);
-extern u32 pch_src_uuid_lo_read(struct pci_dev *pdev);
-extern u32 pch_src_uuid_hi_read(struct pci_dev *pdev);
-extern u64 pch_rx_snap_read(struct pci_dev *pdev);
-extern u64 pch_tx_snap_read(struct pci_dev *pdev);
-extern int pch_set_station_address(u8 *addr, struct pci_dev *pdev);
+int pch_gbe_up(struct pch_gbe_adapter *adapter);
+void pch_gbe_down(struct pch_gbe_adapter *adapter);
+void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter);
+void pch_gbe_reset(struct pch_gbe_adapter *adapter);
+int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
+                              struct pch_gbe_tx_ring *txdr);
+int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
+                              struct pch_gbe_rx_ring *rxdr);
+void pch_gbe_free_tx_resources(struct pch_gbe_adapter *adapter,
+                              struct pch_gbe_tx_ring *tx_ring);
+void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter,
+                              struct pch_gbe_rx_ring *rx_ring);
+void pch_gbe_update_stats(struct pch_gbe_adapter *adapter);
+u32 pch_ch_control_read(struct pci_dev *pdev);
+void pch_ch_control_write(struct pci_dev *pdev, u32 val);
+u32 pch_ch_event_read(struct pci_dev *pdev);
+void pch_ch_event_write(struct pci_dev *pdev, u32 val);
+u32 pch_src_uuid_lo_read(struct pci_dev *pdev);
+u32 pch_src_uuid_hi_read(struct pci_dev *pdev);
+u64 pch_rx_snap_read(struct pci_dev *pdev);
+u64 pch_tx_snap_read(struct pci_dev *pdev);
+int pch_set_station_address(u8 *addr, struct pci_dev *pdev);
 
 /* pch_gbe_param.c */
-extern void pch_gbe_check_options(struct pch_gbe_adapter *adapter);
+void pch_gbe_check_options(struct pch_gbe_adapter *adapter);
 
 /* pch_gbe_ethtool.c */
-extern void pch_gbe_set_ethtool_ops(struct net_device *netdev);
+void pch_gbe_set_ethtool_ops(struct net_device *netdev);
 
 /* pch_gbe_mac.c */
-extern s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw);
-extern s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw);
-extern u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw,
-                                 u32 addr, u32 dir, u32 reg, u16 data);
+s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw);
+s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw);
+u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw, u32 addr, u32 dir, u32 reg,
+                         u16 data);
 #endif /* _PCH_GBE_H_ */
index 32675e16021e8aafd2a5598473cc2ad54dae7668..9adcdbb49476e53face68f353dd7261ffea356b3 100644 (file)
@@ -53,8 +53,8 @@
 
 #define _NETXEN_NIC_LINUX_MAJOR 4
 #define _NETXEN_NIC_LINUX_MINOR 0
-#define _NETXEN_NIC_LINUX_SUBVERSION 81
-#define NETXEN_NIC_LINUX_VERSIONID  "4.0.81"
+#define _NETXEN_NIC_LINUX_SUBVERSION 82
+#define NETXEN_NIC_LINUX_VERSIONID  "4.0.82"
 
 #define NETXEN_VERSION_CODE(a, b, c)   (((a) << 24) + ((b) << 16) + (c))
 #define _major(v)      (((v) >> 24) & 0xff)
@@ -1883,9 +1883,8 @@ static inline u32 netxen_tx_avail(struct nx_host_tx_ring *tx_ring)
 
 int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, u64 *mac);
 int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, u64 *mac);
-extern void netxen_change_ringparam(struct netxen_adapter *adapter);
-extern int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr,
-                               int *valp);
+void netxen_change_ringparam(struct netxen_adapter *adapter);
+int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp);
 
 extern const struct ethtool_ops netxen_nic_ethtool_ops;
 
index 32c790659f9c1ba0410614c30818eb9cf20a46ee..0c64c82b9acffdcfd58b9471f971d0eebe95a9cc 100644 (file)
@@ -958,6 +958,7 @@ enum {
 #define NETXEN_PEG_HALT_STATUS2        (NETXEN_CAM_RAM(0xac))
 #define NX_CRB_DEV_REF_COUNT           (NETXEN_CAM_RAM(0x138))
 #define NX_CRB_DEV_STATE               (NETXEN_CAM_RAM(0x140))
+#define NETXEN_ULA_KEY                 (NETXEN_CAM_RAM(0x178))
 
 /* MiniDIMM related macros */
 #define NETXEN_DIMM_CAPABILITY         (NETXEN_CAM_RAM(0x258))
index 8375cbde996976047475b9c5e6c16f33fd365df4..67efe754367de10d78e6bb1cbacd65caa6f1524c 100644 (file)
@@ -648,7 +648,7 @@ nx_p3_sre_macaddr_change(struct netxen_adapter *adapter, u8 *addr, unsigned op)
 
        mac_req = (nx_mac_req_t *)&req.words[0];
        mac_req->op = op;
-       memcpy(mac_req->mac_addr, addr, 6);
+       memcpy(mac_req->mac_addr, addr, ETH_ALEN);
 
        return netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
 }
index cbd75f97ffb3c6d32624e15b81adcb05d5d4b1d9..5ec21c50373cc90d84d35ab4082c013fb7dc6a10 100644 (file)
@@ -1415,6 +1415,32 @@ netxen_setup_netdev(struct netxen_adapter *adapter,
        return 0;
 }
 
+#define NETXEN_ULA_ADAPTER_KEY         (0xdaddad01)
+#define NETXEN_NON_ULA_ADAPTER_KEY     (0xdaddad00)
+
+static void netxen_read_ula_info(struct netxen_adapter *adapter)
+{
+       u32 temp;
+
+       /* Print ULA info only once for an adapter */
+       if (adapter->portnum != 0)
+               return;
+
+       temp = NXRD32(adapter, NETXEN_ULA_KEY);
+       switch (temp) {
+       case NETXEN_ULA_ADAPTER_KEY:
+               dev_info(&adapter->pdev->dev, "ULA adapter");
+               break;
+       case NETXEN_NON_ULA_ADAPTER_KEY:
+               dev_info(&adapter->pdev->dev, "non ULA adapter");
+               break;
+       default:
+               break;
+       }
+
+       return;
+}
+
 #ifdef CONFIG_PCIEAER
 static void netxen_mask_aer_correctable(struct netxen_adapter *adapter)
 {
@@ -1561,6 +1587,8 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                goto err_out_disable_msi;
        }
 
+       netxen_read_ula_info(adapter);
+
        err = netxen_setup_netdev(adapter, netdev);
        if (err)
                goto err_out_disable_msi;
index 4d7ad0074d1c0c1eb1ef1026ab8954bd050f4071..ebe4c86e5230223f0c5465485a9812dc1a0aa3b0 100644 (file)
@@ -1794,3 +1794,11 @@ const struct ethtool_ops qlcnic_sriov_vf_ethtool_ops = {
        .set_msglevel           = qlcnic_set_msglevel,
        .get_msglevel           = qlcnic_get_msglevel,
 };
+
+const struct ethtool_ops qlcnic_ethtool_failed_ops = {
+       .get_settings           = qlcnic_get_settings,
+       .get_drvinfo            = qlcnic_get_drvinfo,
+       .set_msglevel           = qlcnic_set_msglevel,
+       .get_msglevel           = qlcnic_get_msglevel,
+       .set_dump               = qlcnic_set_dump,
+};
index f8adc7b01f1f5ef9e62899a9c68c5eea9d2e2cba..73e72eb83bdfb346fd034140b5df7b654d7db193 100644 (file)
@@ -445,7 +445,7 @@ int qlcnic_82xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
 
        mac_req = (struct qlcnic_mac_req *)&req.words[0];
        mac_req->op = op;
-       memcpy(mac_req->mac_addr, addr, 6);
+       memcpy(mac_req->mac_addr, addr, ETH_ALEN);
 
        vlan_req = (struct qlcnic_vlan_req *)&req.words[1];
        vlan_req->vlan_id = cpu_to_le16(vlan_id);
index c4c5023e1fdf64aed12efce219e2acb04922b142..21d00a0449a10f394fe6b02d5689c7b0ab3b95cf 100644 (file)
@@ -431,6 +431,9 @@ static void qlcnic_82xx_cancel_idc_work(struct qlcnic_adapter *adapter)
        while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
                usleep_range(10000, 11000);
 
+       if (!adapter->fw_work.work.func)
+               return;
+
        cancel_delayed_work_sync(&adapter->fw_work);
 }
 
@@ -2275,8 +2278,9 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                adapter->portnum = adapter->ahw->pci_func;
                err = qlcnic_start_firmware(adapter);
                if (err) {
-                       dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n");
-                       goto err_out_free_hw;
+                       dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n"
+                               "\t\tIf reboot doesn't help, try flashing the card\n");
+                       goto err_out_maintenance_mode;
                }
 
                qlcnic_get_multiq_capability(adapter);
@@ -2408,6 +2412,22 @@ err_out_disable_pdev:
        pci_set_drvdata(pdev, NULL);
        pci_disable_device(pdev);
        return err;
+
+err_out_maintenance_mode:
+       netdev->netdev_ops = &qlcnic_netdev_failed_ops;
+       SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_failed_ops);
+       err = register_netdev(netdev);
+
+       if (err) {
+               dev_err(&pdev->dev, "Failed to register net device\n");
+               qlcnic_clr_all_drv_state(adapter, 0);
+               goto err_out_free_hw;
+       }
+
+       pci_set_drvdata(pdev, adapter);
+       qlcnic_add_sysfs(adapter);
+
+       return 0;
 }
 
 static void qlcnic_remove(struct pci_dev *pdev)
@@ -2518,8 +2538,16 @@ static int qlcnic_resume(struct pci_dev *pdev)
 static int qlcnic_open(struct net_device *netdev)
 {
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       u32 state;
        int err;
 
+       state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
+       if (state == QLCNIC_DEV_FAILED || state == QLCNIC_DEV_BADBAD) {
+               netdev_err(netdev, "%s: Device is in FAILED state\n", __func__);
+
+               return -EIO;
+       }
+
        netif_carrier_off(netdev);
 
        err = qlcnic_attach(adapter);
@@ -3228,6 +3256,13 @@ void qlcnic_82xx_dev_request_reset(struct qlcnic_adapter *adapter, u32 key)
                return;
 
        state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
+       if (state == QLCNIC_DEV_FAILED || state == QLCNIC_DEV_BADBAD) {
+               netdev_err(adapter->netdev, "%s: Device is in FAILED state\n",
+                          __func__);
+               qlcnic_api_unlock(adapter);
+
+               return;
+       }
 
        if (state == QLCNIC_DEV_READY) {
                QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE,
index 330d9a8774ad69a1d6d8e4d4f38aa87e243bf699..686f460b15022b4b2b7759ad29493a64a1f84592 100644 (file)
@@ -397,6 +397,7 @@ static int qlcnic_pci_sriov_disable(struct qlcnic_adapter *adapter)
 {
        struct net_device *netdev = adapter->netdev;
 
+       rtnl_lock();
        if (netif_running(netdev))
                __qlcnic_down(adapter, netdev);
 
@@ -407,12 +408,15 @@ static int qlcnic_pci_sriov_disable(struct qlcnic_adapter *adapter)
        /* After disabling SRIOV re-init the driver in default mode
           configure opmode based on op_mode of function
         */
-       if (qlcnic_83xx_configure_opmode(adapter))
+       if (qlcnic_83xx_configure_opmode(adapter)) {
+               rtnl_unlock();
                return -EIO;
+       }
 
        if (netif_running(netdev))
                __qlcnic_up(adapter, netdev);
 
+       rtnl_unlock();
        return 0;
 }
 
@@ -533,6 +537,7 @@ static int qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter, int num_vfs)
                return -EIO;
        }
 
+       rtnl_lock();
        if (netif_running(netdev))
                __qlcnic_down(adapter, netdev);
 
@@ -555,6 +560,7 @@ static int qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter, int num_vfs)
                __qlcnic_up(adapter, netdev);
 
 error:
+       rtnl_unlock();
        return err;
 }
 
index c6165d05cc13c3806cf60d9ac7513ffdf5832eed..019f4377307f025df8d7f93d3e473e7811f4101a 100644 (file)
@@ -1272,6 +1272,7 @@ void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
 void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
 {
        struct device *dev = &adapter->pdev->dev;
+       u32 state;
 
        if (device_create_bin_file(dev, &bin_attr_port_stats))
                dev_info(dev, "failed to create port stats sysfs entry");
@@ -1285,8 +1286,13 @@ void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
        if (device_create_bin_file(dev, &bin_attr_mem))
                dev_info(dev, "failed to create mem sysfs entry\n");
 
+       state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
+       if (state == QLCNIC_DEV_FAILED || state == QLCNIC_DEV_BADBAD)
+               return;
+
        if (device_create_bin_file(dev, &bin_attr_pci_config))
                dev_info(dev, "failed to create pci config sysfs entry");
+
        if (device_create_file(dev, &dev_attr_beacon))
                dev_info(dev, "failed to create beacon sysfs entry");
 
@@ -1307,6 +1313,7 @@ void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
 void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
 {
        struct device *dev = &adapter->pdev->dev;
+       u32 state;
 
        device_remove_bin_file(dev, &bin_attr_port_stats);
 
@@ -1315,6 +1322,11 @@ void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
        device_remove_file(dev, &dev_attr_diag_mode);
        device_remove_bin_file(dev, &bin_attr_crb);
        device_remove_bin_file(dev, &bin_attr_mem);
+
+       state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
+       if (state == QLCNIC_DEV_FAILED || state == QLCNIC_DEV_BADBAD)
+               return;
+
        device_remove_bin_file(dev, &bin_attr_pci_config);
        device_remove_file(dev, &dev_attr_beacon);
        if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
index 89943377846699e1e2719be8b2da16b07b144d09..0c9c4e89559524d78aa789dfb65e5e32211b8bc1 100644 (file)
@@ -18,7 +18,7 @@
  */
 #define DRV_NAME       "qlge"
 #define DRV_STRING     "QLogic 10 Gigabit PCI-E Ethernet Driver "
-#define DRV_VERSION    "v1.00.00.32"
+#define DRV_VERSION    "1.00.00.33"
 
 #define WQ_ADDR_ALIGN  0x3     /* 4 byte alignment */
 
@@ -2206,14 +2206,14 @@ extern char qlge_driver_name[];
 extern const char qlge_driver_version[];
 extern const struct ethtool_ops qlge_ethtool_ops;
 
-extern int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask);
-extern void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask);
-extern int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data);
-extern int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
-                              u32 *value);
-extern int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value);
-extern int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
-                       u16 q_id);
+int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask);
+void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask);
+int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data);
+int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
+                       u32 *value);
+int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value);
+int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
+                u16 q_id);
 void ql_queue_fw_error(struct ql_adapter *qdev);
 void ql_mpi_work(struct work_struct *work);
 void ql_mpi_reset_work(struct work_struct *work);
@@ -2233,10 +2233,9 @@ int ql_unpause_mpi_risc(struct ql_adapter *qdev);
 int ql_pause_mpi_risc(struct ql_adapter *qdev);
 int ql_hard_reset_mpi_risc(struct ql_adapter *qdev);
 int ql_soft_reset_mpi_risc(struct ql_adapter *qdev);
-int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
-               u32 ram_addr, int word_count);
-int ql_core_dump(struct ql_adapter *qdev,
-               struct ql_mpi_coredump *mpi_coredump);
+int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf, u32 ram_addr,
+                         int word_count);
+int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump);
 int ql_mb_about_fw(struct ql_adapter *qdev);
 int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol);
 int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol);
@@ -2249,8 +2248,7 @@ int ql_mb_get_port_cfg(struct ql_adapter *qdev);
 int ql_mb_set_port_cfg(struct ql_adapter *qdev);
 int ql_wait_fifo_empty(struct ql_adapter *qdev);
 void ql_get_dump(struct ql_adapter *qdev, void *buff);
-void ql_gen_reg_dump(struct ql_adapter *qdev,
-                       struct ql_reg_dump *mpi_coredump);
+void ql_gen_reg_dump(struct ql_adapter *qdev, struct ql_reg_dump *mpi_coredump);
 netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev);
 void ql_check_lb_frame(struct ql_adapter *, struct sk_buff *);
 int ql_own_firmware(struct ql_adapter *qdev);
@@ -2264,9 +2262,9 @@ int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget);
 /* #define QL_OB_DUMP */
 
 #ifdef QL_REG_DUMP
-extern void ql_dump_xgmac_control_regs(struct ql_adapter *qdev);
-extern void ql_dump_routing_entries(struct ql_adapter *qdev);
-extern void ql_dump_regs(struct ql_adapter *qdev);
+void ql_dump_xgmac_control_regs(struct ql_adapter *qdev);
+void ql_dump_routing_entries(struct ql_adapter *qdev);
+void ql_dump_regs(struct ql_adapter *qdev);
 #define QL_DUMP_REGS(qdev) ql_dump_regs(qdev)
 #define QL_DUMP_ROUTE(qdev) ql_dump_routing_entries(qdev)
 #define QL_DUMP_XGMAC_CONTROL_REGS(qdev) ql_dump_xgmac_control_regs(qdev)
@@ -2277,26 +2275,26 @@ extern void ql_dump_regs(struct ql_adapter *qdev);
 #endif
 
 #ifdef QL_STAT_DUMP
-extern void ql_dump_stat(struct ql_adapter *qdev);
+void ql_dump_stat(struct ql_adapter *qdev);
 #define QL_DUMP_STAT(qdev) ql_dump_stat(qdev)
 #else
 #define QL_DUMP_STAT(qdev)
 #endif
 
 #ifdef QL_DEV_DUMP
-extern void ql_dump_qdev(struct ql_adapter *qdev);
+void ql_dump_qdev(struct ql_adapter *qdev);
 #define QL_DUMP_QDEV(qdev) ql_dump_qdev(qdev)
 #else
 #define QL_DUMP_QDEV(qdev)
 #endif
 
 #ifdef QL_CB_DUMP
-extern void ql_dump_wqicb(struct wqicb *wqicb);
-extern void ql_dump_tx_ring(struct tx_ring *tx_ring);
-extern void ql_dump_ricb(struct ricb *ricb);
-extern void ql_dump_cqicb(struct cqicb *cqicb);
-extern void ql_dump_rx_ring(struct rx_ring *rx_ring);
-extern void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id);
+void ql_dump_wqicb(struct wqicb *wqicb);
+void ql_dump_tx_ring(struct tx_ring *tx_ring);
+void ql_dump_ricb(struct ricb *ricb);
+void ql_dump_cqicb(struct cqicb *cqicb);
+void ql_dump_rx_ring(struct rx_ring *rx_ring);
+void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id);
 #define QL_DUMP_RICB(ricb) ql_dump_ricb(ricb)
 #define QL_DUMP_WQICB(wqicb) ql_dump_wqicb(wqicb)
 #define QL_DUMP_TX_RING(tx_ring) ql_dump_tx_ring(tx_ring)
@@ -2314,9 +2312,9 @@ extern void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id);
 #endif
 
 #ifdef QL_OB_DUMP
-extern void ql_dump_tx_desc(struct tx_buf_desc *tbd);
-extern void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb);
-extern void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp);
+void ql_dump_tx_desc(struct tx_buf_desc *tbd);
+void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb);
+void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp);
 #define QL_DUMP_OB_MAC_IOCB(ob_mac_iocb) ql_dump_ob_mac_iocb(ob_mac_iocb)
 #define QL_DUMP_OB_MAC_RSP(ob_mac_rsp) ql_dump_ob_mac_rsp(ob_mac_rsp)
 #else
@@ -2325,14 +2323,14 @@ extern void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp);
 #endif
 
 #ifdef QL_IB_DUMP
-extern void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp);
+void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp);
 #define QL_DUMP_IB_MAC_RSP(ib_mac_rsp) ql_dump_ib_mac_rsp(ib_mac_rsp)
 #else
 #define QL_DUMP_IB_MAC_RSP(ib_mac_rsp)
 #endif
 
 #ifdef QL_ALL_DUMP
-extern void ql_dump_all(struct ql_adapter *qdev);
+void ql_dump_all(struct ql_adapter *qdev);
 #define QL_DUMP_ALL(qdev) ql_dump_all(qdev)
 #else
 #define QL_DUMP_ALL(qdev)
index 10093f0c4c0f3d507514da946df9047b3d68d925..6bc5db7039201a1af0a835867e900ed59ab0af13 100644 (file)
@@ -740,8 +740,8 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
        int i;
 
        if (!mpi_coredump) {
-               netif_err(qdev, drv, qdev->ndev, "No memory available\n");
-               return -ENOMEM;
+               netif_err(qdev, drv, qdev->ndev, "No memory allocated\n");
+               return -EINVAL;
        }
 
        /* Try to get the spinlock, but dont worry if
index 2553cf4503b9f83996bb72bd291fb756558644f9..64f94098bc0239f99932bc6ed093e64b1cff0d21 100644 (file)
@@ -96,8 +96,10 @@ static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
 
 MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
 
-static int ql_wol(struct ql_adapter *qdev);
-static void qlge_set_multicast_list(struct net_device *ndev);
+static int ql_wol(struct ql_adapter *);
+static void qlge_set_multicast_list(struct net_device *);
+static int ql_adapter_down(struct ql_adapter *);
+static int ql_adapter_up(struct ql_adapter *);
 
 /* This hardware semaphore causes exclusive access to
  * resources shared between the NIC driver, MPI firmware,
@@ -1464,6 +1466,29 @@ static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err,
        }
 }
 
+/**
+ * ql_update_mac_hdr_len - helper routine to update the mac header length
+ * based on vlan tags if present
+ */
+static void ql_update_mac_hdr_len(struct ql_adapter *qdev,
+                                 struct ib_mac_iocb_rsp *ib_mac_rsp,
+                                 void *page, size_t *len)
+{
+       u16 *tags;
+
+       if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
+               return;
+       if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) {
+               tags = (u16 *)page;
+               /* Look for stacked vlan tags in ethertype field */
+               if (tags[6] == ETH_P_8021Q &&
+                   tags[8] == ETH_P_8021Q)
+                       *len += 2 * VLAN_HLEN;
+               else
+                       *len += VLAN_HLEN;
+       }
+}
+
 /* Process an inbound completion from an rx ring. */
 static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
                                        struct rx_ring *rx_ring,
@@ -1523,6 +1548,7 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
        void *addr;
        struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
        struct napi_struct *napi = &rx_ring->napi;
+       size_t hlen = ETH_HLEN;
 
        skb = netdev_alloc_skb(ndev, length);
        if (!skb) {
@@ -1540,25 +1566,28 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
                goto err_out;
        }
 
+       /* Update the MAC header length*/
+       ql_update_mac_hdr_len(qdev, ib_mac_rsp, addr, &hlen);
+
        /* The max framesize filter on this chip is set higher than
         * MTU since FCoE uses 2k frames.
         */
-       if (skb->len > ndev->mtu + ETH_HLEN) {
+       if (skb->len > ndev->mtu + hlen) {
                netif_err(qdev, drv, qdev->ndev,
                          "Segment too small, dropping.\n");
                rx_ring->rx_dropped++;
                goto err_out;
        }
-       memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
+       memcpy(skb_put(skb, hlen), addr, hlen);
        netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
                     "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
                     length);
        skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
-                               lbq_desc->p.pg_chunk.offset+ETH_HLEN,
-                               length-ETH_HLEN);
-       skb->len += length-ETH_HLEN;
-       skb->data_len += length-ETH_HLEN;
-       skb->truesize += length-ETH_HLEN;
+                               lbq_desc->p.pg_chunk.offset + hlen,
+                               length - hlen);
+       skb->len += length - hlen;
+       skb->data_len += length - hlen;
+       skb->truesize += length - hlen;
 
        rx_ring->rx_packets++;
        rx_ring->rx_bytes += skb->len;
@@ -1576,7 +1605,7 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
                                (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
                        /* Unfragmented ipv4 UDP frame. */
                        struct iphdr *iph =
-                               (struct iphdr *) ((u8 *)addr + ETH_HLEN);
+                               (struct iphdr *)((u8 *)addr + hlen);
                        if (!(iph->frag_off &
                                htons(IP_MF|IP_OFFSET))) {
                                skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1726,7 +1755,8 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
        struct bq_desc *sbq_desc;
        struct sk_buff *skb = NULL;
        u32 length = le32_to_cpu(ib_mac_rsp->data_len);
-       u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
+       u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
+       size_t hlen = ETH_HLEN;
 
        /*
         * Handle the header buffer if present.
@@ -1853,9 +1883,10 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
                        skb->data_len += length;
                        skb->truesize += length;
                        length -= length;
-                       __pskb_pull_tail(skb,
-                               (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
-                               VLAN_ETH_HLEN : ETH_HLEN);
+                       ql_update_mac_hdr_len(qdev, ib_mac_rsp,
+                                             lbq_desc->p.pg_chunk.va,
+                                             &hlen);
+                       __pskb_pull_tail(skb, hlen);
                }
        } else {
                /*
@@ -1910,8 +1941,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
                        length -= size;
                        i++;
                }
-               __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
-                               VLAN_ETH_HLEN : ETH_HLEN);
+               ql_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va,
+                                     &hlen);
+               __pskb_pull_tail(skb, hlen);
        }
        return skb;
 }
@@ -2003,7 +2035,7 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
        rx_ring->rx_packets++;
        rx_ring->rx_bytes += skb->len;
        skb_record_rx_queue(skb, rx_ring->cq_id);
-       if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0))
+       if (vlan_id != 0xffff)
                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
        if (skb->ip_summed == CHECKSUM_UNNECESSARY)
                napi_gro_receive(&rx_ring->napi, skb);
@@ -2017,7 +2049,8 @@ static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
                                        struct ib_mac_iocb_rsp *ib_mac_rsp)
 {
        u32 length = le32_to_cpu(ib_mac_rsp->data_len);
-       u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
+       u16 vlan_id = ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
+                       (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)) ?
                        ((le16_to_cpu(ib_mac_rsp->vlan_id) &
                        IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
 
@@ -2310,9 +2343,39 @@ static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
        }
 }
 
+/**
+ * qlge_update_hw_vlan_features - helper routine to reinitialize the adapter
+ * based on the features to enable/disable hardware vlan accel
+ */
+static int qlge_update_hw_vlan_features(struct net_device *ndev,
+                                       netdev_features_t features)
+{
+       struct ql_adapter *qdev = netdev_priv(ndev);
+       int status = 0;
+
+       status = ql_adapter_down(qdev);
+       if (status) {
+               netif_err(qdev, link, qdev->ndev,
+                         "Failed to bring down the adapter\n");
+               return status;
+       }
+
+       /* update the features with resent change */
+       ndev->features = features;
+
+       status = ql_adapter_up(qdev);
+       if (status) {
+               netif_err(qdev, link, qdev->ndev,
+                         "Failed to bring up the adapter\n");
+               return status;
+       }
+       return status;
+}
+
 static netdev_features_t qlge_fix_features(struct net_device *ndev,
        netdev_features_t features)
 {
+       int err;
        /*
         * Since there is no support for separate rx/tx vlan accel
         * enable/disable make sure tx flag is always in same state as rx.
@@ -2322,6 +2385,11 @@ static netdev_features_t qlge_fix_features(struct net_device *ndev,
        else
                features &= ~NETIF_F_HW_VLAN_CTAG_TX;
 
+       /* Update the behavior of vlan accel in the adapter */
+       err = qlge_update_hw_vlan_features(ndev, features);
+       if (err)
+               return err;
+
        return features;
 }
 
@@ -3704,8 +3772,12 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
        ql_write32(qdev, SYS, mask | value);
 
        /* Set the default queue, and VLAN behavior. */
-       value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
-       mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
+       value = NIC_RCV_CFG_DFQ;
+       mask = NIC_RCV_CFG_DFQ_MASK;
+       if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
+               value |= NIC_RCV_CFG_RV;
+               mask |= (NIC_RCV_CFG_RV << 16);
+       }
        ql_write32(qdev, NIC_RCV_CFG, (mask | value));
 
        /* Set the MPI interrupt to enabled. */
@@ -4692,11 +4764,15 @@ static int qlge_probe(struct pci_dev *pdev,
 
        qdev = netdev_priv(ndev);
        SET_NETDEV_DEV(ndev, &pdev->dev);
-       ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
-               NETIF_F_TSO | NETIF_F_TSO_ECN |
-               NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_RXCSUM;
-       ndev->features = ndev->hw_features |
-               NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
+       ndev->hw_features = NETIF_F_SG |
+                           NETIF_F_IP_CSUM |
+                           NETIF_F_TSO |
+                           NETIF_F_TSO_ECN |
+                           NETIF_F_HW_VLAN_CTAG_TX |
+                           NETIF_F_HW_VLAN_CTAG_RX |
+                           NETIF_F_HW_VLAN_CTAG_FILTER |
+                           NETIF_F_RXCSUM;
+       ndev->features = ndev->hw_features;
        ndev->vlan_features = ndev->hw_features;
 
        if (test_bit(QL_DMA64, &qdev->flags))
index ff2bf8a4e24773429f02e1b66777e76bdd079134..7ad146080c3649585e5c4611c6ba79883bda6293 100644 (file)
@@ -1274,7 +1274,7 @@ void ql_mpi_reset_work(struct work_struct *work)
                return;
        }
 
-       if (!ql_core_dump(qdev, qdev->mpi_coredump)) {
+       if (qdev->mpi_coredump && !ql_core_dump(qdev, qdev->mpi_coredump)) {
                netif_err(qdev, drv, qdev->ndev, "Core is dumped!\n");
                qdev->core_is_dumped = 1;
                queue_delayed_work(qdev->workqueue,
index 5cd831ebfa83b0a95c472926a0105c521f7c352a..c8df52bac162ffb490c4380d3345aaf0d52bb333 100644 (file)
@@ -868,7 +868,7 @@ static void update_mac_address(struct net_device *ndev)
 static void read_mac_address(struct net_device *ndev, unsigned char *mac)
 {
        if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
-               memcpy(ndev->dev_addr, mac, 6);
+               memcpy(ndev->dev_addr, mac, ETH_ALEN);
        } else {
                ndev->dev_addr[0] = (sh_eth_read(ndev, MAHR) >> 24);
                ndev->dev_addr[1] = (sh_eth_read(ndev, MAHR) >> 16) & 0xFF;
index 31d01284e333c80aeceb3d31083339593e0fabaf..b8235ee5d7d739ae879aaba810620740951af8ff 100644 (file)
 #define EFX_MEM_BAR 2
 
 /* TX */
-extern int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
-extern void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
-extern void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
-extern void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue);
-extern void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
-extern netdev_tx_t
-efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev);
-extern netdev_tx_t
-efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
-extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
-extern int efx_setup_tc(struct net_device *net_dev, u8 num_tc);
-extern unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
+int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
+void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
+void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
+void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue);
+void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
+netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
+                               struct net_device *net_dev);
+netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
+void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
+int efx_setup_tc(struct net_device *net_dev, u8 num_tc);
+unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
 extern unsigned int efx_piobuf_size;
 
 /* RX */
-extern void efx_rx_config_page_split(struct efx_nic *efx);
-extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
-extern void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
-extern void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
-extern void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
-extern void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue);
-extern void efx_rx_slow_fill(unsigned long context);
-extern void __efx_rx_packet(struct efx_channel *channel);
-extern void efx_rx_packet(struct efx_rx_queue *rx_queue,
-                         unsigned int index, unsigned int n_frags,
-                         unsigned int len, u16 flags);
+void efx_rx_config_page_split(struct efx_nic *efx);
+int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
+void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
+void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
+void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
+void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue);
+void efx_rx_slow_fill(unsigned long context);
+void __efx_rx_packet(struct efx_channel *channel);
+void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
+                  unsigned int n_frags, unsigned int len, u16 flags);
 static inline void efx_rx_flush_packet(struct efx_channel *channel)
 {
        if (channel->rx_pkt_n_frags)
                __efx_rx_packet(channel);
 }
-extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
+void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
 
 #define EFX_MAX_DMAQ_SIZE 4096UL
 #define EFX_DEFAULT_DMAQ_SIZE 1024UL
@@ -163,9 +161,9 @@ static inline s32 efx_filter_get_rx_ids(struct efx_nic *efx,
        return efx->type->filter_get_rx_ids(efx, priority, buf, size);
 }
 #ifdef CONFIG_RFS_ACCEL
-extern int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
-                         u16 rxq_index, u32 flow_id);
-extern bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota);
+int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
+                  u16 rxq_index, u32 flow_id);
+bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota);
 static inline void efx_filter_rfs_expire(struct efx_channel *channel)
 {
        if (channel->rfs_filters_added >= 60 &&
@@ -177,50 +175,48 @@ static inline void efx_filter_rfs_expire(struct efx_channel *channel)
 static inline void efx_filter_rfs_expire(struct efx_channel *channel) {}
 #define efx_filter_rfs_enabled() 0
 #endif
-extern bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec);
+bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec);
 
 /* Channels */
-extern int efx_channel_dummy_op_int(struct efx_channel *channel);
-extern void efx_channel_dummy_op_void(struct efx_channel *channel);
-extern int
-efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries);
+int efx_channel_dummy_op_int(struct efx_channel *channel);
+void efx_channel_dummy_op_void(struct efx_channel *channel);
+int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries);
 
 /* Ports */
-extern int efx_reconfigure_port(struct efx_nic *efx);
-extern int __efx_reconfigure_port(struct efx_nic *efx);
+int efx_reconfigure_port(struct efx_nic *efx);
+int __efx_reconfigure_port(struct efx_nic *efx);
 
 /* Ethtool support */
 extern const struct ethtool_ops efx_ethtool_ops;
 
 /* Reset handling */
-extern int efx_reset(struct efx_nic *efx, enum reset_type method);
-extern void efx_reset_down(struct efx_nic *efx, enum reset_type method);
-extern int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok);
-extern int efx_try_recovery(struct efx_nic *efx);
+int efx_reset(struct efx_nic *efx, enum reset_type method);
+void efx_reset_down(struct efx_nic *efx, enum reset_type method);
+int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok);
+int efx_try_recovery(struct efx_nic *efx);
 
 /* Global */
-extern void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);
-extern int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
-                                  unsigned int rx_usecs, bool rx_adaptive,
-                                  bool rx_may_override_tx);
-extern void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
-                                  unsigned int *rx_usecs, bool *rx_adaptive);
+void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);
+int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
+                           unsigned int rx_usecs, bool rx_adaptive,
+                           bool rx_may_override_tx);
+void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
+                           unsigned int *rx_usecs, bool *rx_adaptive);
 
 /* Dummy PHY ops for PHY drivers */
-extern int efx_port_dummy_op_int(struct efx_nic *efx);
-extern void efx_port_dummy_op_void(struct efx_nic *efx);
-
+int efx_port_dummy_op_int(struct efx_nic *efx);
+void efx_port_dummy_op_void(struct efx_nic *efx);
 
 /* MTD */
 #ifdef CONFIG_SFC_MTD
-extern int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts,
-                      size_t n_parts, size_t sizeof_part);
+int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts,
+               size_t n_parts, size_t sizeof_part);
 static inline int efx_mtd_probe(struct efx_nic *efx)
 {
        return efx->type->mtd_probe(efx);
 }
-extern void efx_mtd_rename(struct efx_nic *efx);
-extern void efx_mtd_remove(struct efx_nic *efx);
+void efx_mtd_rename(struct efx_nic *efx);
+void efx_mtd_remove(struct efx_nic *efx);
 #else
 static inline int efx_mtd_probe(struct efx_nic *efx) { return 0; }
 static inline void efx_mtd_rename(struct efx_nic *efx) {}
@@ -242,9 +238,9 @@ static inline void efx_schedule_channel_irq(struct efx_channel *channel)
        efx_schedule_channel(channel);
 }
 
-extern void efx_link_status_changed(struct efx_nic *efx);
-extern void efx_link_set_advertising(struct efx_nic *efx, u32);
-extern void efx_link_set_wanted_fc(struct efx_nic *efx, u8);
+void efx_link_status_changed(struct efx_nic *efx);
+void efx_link_set_advertising(struct efx_nic *efx, u32);
+void efx_link_set_wanted_fc(struct efx_nic *efx, u8);
 
 static inline void efx_device_detach_sync(struct efx_nic *efx)
 {
index 128d7cdf9eb207583afc36dc8706ef3503b3f5a5..c082562dbf4ee8d96388dc21e0b2da1803fe84f2 100644 (file)
 
 /* A reboot/assertion causes the MCDI status word to be set after the
  * command word is set or a REBOOT event is sent. If we notice a reboot
- * via these mechanisms then wait 20ms for the status word to be set.
+ * via these mechanisms then wait 250ms for the status word to be set.
  */
 #define MCDI_STATUS_DELAY_US           100
-#define MCDI_STATUS_DELAY_COUNT                200
+#define MCDI_STATUS_DELAY_COUNT                2500
 #define MCDI_STATUS_SLEEP_MS                                           \
        (MCDI_STATUS_DELAY_US * MCDI_STATUS_DELAY_COUNT / 1000)
 
@@ -800,9 +800,6 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
        } else {
                int count;
 
-               /* Nobody was waiting for an MCDI request, so trigger a reset */
-               efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
-
                /* Consume the status word since efx_mcdi_rpc_finish() won't */
                for (count = 0; count < MCDI_STATUS_DELAY_COUNT; ++count) {
                        if (efx_mcdi_poll_reboot(efx))
@@ -810,6 +807,9 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
                        udelay(MCDI_STATUS_DELAY_US);
                }
                mcdi->new_epoch = true;
+
+               /* Nobody was waiting for an MCDI request, so trigger a reset */
+               efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
        }
 
        spin_unlock(&mcdi->iface_lock);
index c34d0d4e10ee2e0d453b3f192acaed86c5d75301..656a3277c2b210e69ffd028d059ce10b809e8db8 100644 (file)
@@ -108,38 +108,35 @@ static inline struct efx_mcdi_mon *efx_mcdi_mon(struct efx_nic *efx)
 }
 #endif
 
-extern int efx_mcdi_init(struct efx_nic *efx);
-extern void efx_mcdi_fini(struct efx_nic *efx);
+int efx_mcdi_init(struct efx_nic *efx);
+void efx_mcdi_fini(struct efx_nic *efx);
 
-extern int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
-                       const efx_dword_t *inbuf, size_t inlen,
+int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, const efx_dword_t *inbuf,
+                size_t inlen, efx_dword_t *outbuf, size_t outlen,
+                size_t *outlen_actual);
+
+int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
+                      const efx_dword_t *inbuf, size_t inlen);
+int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
                        efx_dword_t *outbuf, size_t outlen,
                        size_t *outlen_actual);
 
-extern int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
-                             const efx_dword_t *inbuf, size_t inlen);
-extern int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
-                              efx_dword_t *outbuf, size_t outlen,
-                              size_t *outlen_actual);
-
 typedef void efx_mcdi_async_completer(struct efx_nic *efx,
                                      unsigned long cookie, int rc,
                                      efx_dword_t *outbuf,
                                      size_t outlen_actual);
-extern int efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
-                             const efx_dword_t *inbuf, size_t inlen,
-                             size_t outlen,
-                             efx_mcdi_async_completer *complete,
-                             unsigned long cookie);
+int efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
+                      const efx_dword_t *inbuf, size_t inlen, size_t outlen,
+                      efx_mcdi_async_completer *complete,
+                      unsigned long cookie);
 
-extern int efx_mcdi_poll_reboot(struct efx_nic *efx);
-extern void efx_mcdi_mode_poll(struct efx_nic *efx);
-extern void efx_mcdi_mode_event(struct efx_nic *efx);
-extern void efx_mcdi_flush_async(struct efx_nic *efx);
+int efx_mcdi_poll_reboot(struct efx_nic *efx);
+void efx_mcdi_mode_poll(struct efx_nic *efx);
+void efx_mcdi_mode_event(struct efx_nic *efx);
+void efx_mcdi_flush_async(struct efx_nic *efx);
 
-extern void efx_mcdi_process_event(struct efx_channel *channel,
-                                  efx_qword_t *event);
-extern void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
+void efx_mcdi_process_event(struct efx_channel *channel, efx_qword_t *event);
+void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
 
 /* We expect that 16- and 32-bit fields in MCDI requests and responses
  * are appropriately aligned, but 64-bit fields are only
@@ -275,55 +272,54 @@ extern void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
 #define MCDI_EVENT_FIELD(_ev, _field)                  \
        EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field)
 
-extern void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len);
-extern int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
-                                 u16 *fw_subtype_list, u32 *capabilities);
-extern int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart,
-                            u32 dest_evq);
-extern int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out);
-extern int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
-                              size_t *size_out, size_t *erase_size_out,
-                              bool *protected_out);
-extern int efx_mcdi_nvram_test_all(struct efx_nic *efx);
-extern int efx_mcdi_handle_assertion(struct efx_nic *efx);
-extern void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
-extern int efx_mcdi_wol_filter_set_magic(struct efx_nic *efx,
-                                        const u8 *mac, int *id_out);
-extern int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out);
-extern int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id);
-extern int efx_mcdi_wol_filter_reset(struct efx_nic *efx);
-extern int efx_mcdi_flush_rxqs(struct efx_nic *efx);
-extern int efx_mcdi_port_probe(struct efx_nic *efx);
-extern void efx_mcdi_port_remove(struct efx_nic *efx);
-extern int efx_mcdi_port_reconfigure(struct efx_nic *efx);
-extern int efx_mcdi_port_get_number(struct efx_nic *efx);
-extern u32 efx_mcdi_phy_get_caps(struct efx_nic *efx);
-extern void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev);
-extern int efx_mcdi_set_mac(struct efx_nic *efx);
+void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len);
+int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
+                          u16 *fw_subtype_list, u32 *capabilities);
+int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq);
+int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out);
+int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
+                       size_t *size_out, size_t *erase_size_out,
+                       bool *protected_out);
+int efx_mcdi_nvram_test_all(struct efx_nic *efx);
+int efx_mcdi_handle_assertion(struct efx_nic *efx);
+void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
+int efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac,
+                                 int *id_out);
+int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out);
+int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id);
+int efx_mcdi_wol_filter_reset(struct efx_nic *efx);
+int efx_mcdi_flush_rxqs(struct efx_nic *efx);
+int efx_mcdi_port_probe(struct efx_nic *efx);
+void efx_mcdi_port_remove(struct efx_nic *efx);
+int efx_mcdi_port_reconfigure(struct efx_nic *efx);
+int efx_mcdi_port_get_number(struct efx_nic *efx);
+u32 efx_mcdi_phy_get_caps(struct efx_nic *efx);
+void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev);
+int efx_mcdi_set_mac(struct efx_nic *efx);
 #define EFX_MC_STATS_GENERATION_INVALID ((__force __le64)(-1))
-extern void efx_mcdi_mac_start_stats(struct efx_nic *efx);
-extern void efx_mcdi_mac_stop_stats(struct efx_nic *efx);
-extern bool efx_mcdi_mac_check_fault(struct efx_nic *efx);
-extern enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason);
-extern int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method);
-extern int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled);
+void efx_mcdi_mac_start_stats(struct efx_nic *efx);
+void efx_mcdi_mac_stop_stats(struct efx_nic *efx);
+bool efx_mcdi_mac_check_fault(struct efx_nic *efx);
+enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason);
+int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method);
+int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled);
 
 #ifdef CONFIG_SFC_MCDI_MON
-extern int efx_mcdi_mon_probe(struct efx_nic *efx);
-extern void efx_mcdi_mon_remove(struct efx_nic *efx);
+int efx_mcdi_mon_probe(struct efx_nic *efx);
+void efx_mcdi_mon_remove(struct efx_nic *efx);
 #else
 static inline int efx_mcdi_mon_probe(struct efx_nic *efx) { return 0; }
 static inline void efx_mcdi_mon_remove(struct efx_nic *efx) {}
 #endif
 
 #ifdef CONFIG_SFC_MTD
-extern int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start,
-                            size_t len, size_t *retlen, u8 *buffer);
-extern int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len);
-extern int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start,
-                             size_t len, size_t *retlen, const u8 *buffer);
-extern int efx_mcdi_mtd_sync(struct mtd_info *mtd);
-extern void efx_mcdi_mtd_rename(struct efx_mtd_partition *part);
+int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start, size_t len,
+                     size_t *retlen, u8 *buffer);
+int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len);
+int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start, size_t len,
+                      size_t *retlen, const u8 *buffer);
+int efx_mcdi_mtd_sync(struct mtd_info *mtd);
+void efx_mcdi_mtd_rename(struct efx_mtd_partition *part);
 #endif
 
 #endif /* EFX_MCDI_H */
index 16824fecc5ee5c652a34179aeba4130c93fbc5fb..4a2dc4c281b730fd3b415c4714657c8ead4bdad7 100644 (file)
@@ -20,7 +20,7 @@
 
 static inline unsigned efx_mdio_id_rev(u32 id) { return id & 0xf; }
 static inline unsigned efx_mdio_id_model(u32 id) { return (id >> 4) & 0x3f; }
-extern unsigned efx_mdio_id_oui(u32 id);
+unsigned efx_mdio_id_oui(u32 id);
 
 static inline int efx_mdio_read(struct efx_nic *efx, int devad, int addr)
 {
@@ -56,7 +56,7 @@ static inline bool efx_mdio_phyxgxs_lane_sync(struct efx_nic *efx)
        return sync;
 }
 
-extern const char *efx_mdio_mmd_name(int mmd);
+const char *efx_mdio_mmd_name(int mmd);
 
 /*
  * Reset a specific MMD and wait for reset to clear.
@@ -64,30 +64,29 @@ extern const char *efx_mdio_mmd_name(int mmd);
  *
  * This function will sleep
  */
-extern int efx_mdio_reset_mmd(struct efx_nic *efx, int mmd,
-                             int spins, int spintime);
+int efx_mdio_reset_mmd(struct efx_nic *efx, int mmd, int spins, int spintime);
 
 /* As efx_mdio_check_mmd but for multiple MMDs */
 int efx_mdio_check_mmds(struct efx_nic *efx, unsigned int mmd_mask);
 
 /* Check the link status of specified mmds in bit mask */
-extern bool efx_mdio_links_ok(struct efx_nic *efx, unsigned int mmd_mask);
+bool efx_mdio_links_ok(struct efx_nic *efx, unsigned int mmd_mask);
 
 /* Generic transmit disable support though PMAPMD */
-extern void efx_mdio_transmit_disable(struct efx_nic *efx);
+void efx_mdio_transmit_disable(struct efx_nic *efx);
 
 /* Generic part of reconfigure: set/clear loopback bits */
-extern void efx_mdio_phy_reconfigure(struct efx_nic *efx);
+void efx_mdio_phy_reconfigure(struct efx_nic *efx);
 
 /* Set the power state of the specified MMDs */
-extern void efx_mdio_set_mmds_lpower(struct efx_nic *efx,
-                                    int low_power, unsigned int mmd_mask);
+void efx_mdio_set_mmds_lpower(struct efx_nic *efx, int low_power,
+                             unsigned int mmd_mask);
 
 /* Set (some of) the PHY settings over MDIO */
-extern int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd);
+int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd);
 
 /* Push advertising flags and restart autonegotiation */
-extern void efx_mdio_an_reconfigure(struct efx_nic *efx);
+void efx_mdio_an_reconfigure(struct efx_nic *efx);
 
 /* Get pause parameters from AN if available (otherwise return
  * requested pause parameters)
@@ -95,8 +94,7 @@ extern void efx_mdio_an_reconfigure(struct efx_nic *efx);
 u8 efx_mdio_get_pause(struct efx_nic *efx);
 
 /* Wait for specified MMDs to exit reset within a timeout */
-extern int efx_mdio_wait_reset_mmds(struct efx_nic *efx,
-                                   unsigned int mmd_mask);
+int efx_mdio_wait_reset_mmds(struct efx_nic *efx, unsigned int mmd_mask);
 
 /* Set or clear flag, debouncing */
 static inline void
@@ -107,6 +105,6 @@ efx_mdio_set_flag(struct efx_nic *efx, int devad, int addr,
 }
 
 /* Liveness self-test for MDIO PHYs */
-extern int efx_mdio_test_alive(struct efx_nic *efx);
+int efx_mdio_test_alive(struct efx_nic *efx);
 
 #endif /* EFX_MDIO_10G_H */
index 609f06769245395da24220320e8944ee01c24aa7..08883c8edf0e3877a84cc12ca868d00cc5d00f24 100644 (file)
@@ -30,7 +30,7 @@ static inline int efx_nic_rev(struct efx_nic *efx)
        return efx->type->revision;
 }
 
-extern u32 efx_farch_fpga_ver(struct efx_nic *efx);
+u32 efx_farch_fpga_ver(struct efx_nic *efx);
 
 /* NIC has two interlinked PCI functions for the same port. */
 static inline bool efx_nic_is_dual_func(struct efx_nic *efx)
@@ -497,18 +497,18 @@ static inline unsigned int efx_vf_size(struct efx_nic *efx)
        return 1 << efx->vi_scale;
 }
 
-extern int efx_init_sriov(void);
-extern void efx_sriov_probe(struct efx_nic *efx);
-extern int efx_sriov_init(struct efx_nic *efx);
-extern void efx_sriov_mac_address_changed(struct efx_nic *efx);
-extern void efx_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event);
-extern void efx_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event);
-extern void efx_sriov_event(struct efx_channel *channel, efx_qword_t *event);
-extern void efx_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq);
-extern void efx_sriov_flr(struct efx_nic *efx, unsigned flr);
-extern void efx_sriov_reset(struct efx_nic *efx);
-extern void efx_sriov_fini(struct efx_nic *efx);
-extern void efx_fini_sriov(void);
+int efx_init_sriov(void);
+void efx_sriov_probe(struct efx_nic *efx);
+int efx_sriov_init(struct efx_nic *efx);
+void efx_sriov_mac_address_changed(struct efx_nic *efx);
+void efx_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event);
+void efx_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event);
+void efx_sriov_event(struct efx_channel *channel, efx_qword_t *event);
+void efx_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq);
+void efx_sriov_flr(struct efx_nic *efx, unsigned flr);
+void efx_sriov_reset(struct efx_nic *efx);
+void efx_sriov_fini(struct efx_nic *efx);
+void efx_fini_sriov(void);
 
 #else
 
@@ -534,22 +534,20 @@ static inline void efx_fini_sriov(void) {}
 
 #endif
 
-extern int efx_sriov_set_vf_mac(struct net_device *dev, int vf, u8 *mac);
-extern int efx_sriov_set_vf_vlan(struct net_device *dev, int vf,
-                                u16 vlan, u8 qos);
-extern int efx_sriov_get_vf_config(struct net_device *dev, int vf,
-                                  struct ifla_vf_info *ivf);
-extern int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf,
-                                    bool spoofchk);
+int efx_sriov_set_vf_mac(struct net_device *dev, int vf, u8 *mac);
+int efx_sriov_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos);
+int efx_sriov_get_vf_config(struct net_device *dev, int vf,
+                           struct ifla_vf_info *ivf);
+int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf,
+                             bool spoofchk);
 
 struct ethtool_ts_info;
-extern void efx_ptp_probe(struct efx_nic *efx);
-extern int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd);
-extern void efx_ptp_get_ts_info(struct efx_nic *efx,
-                               struct ethtool_ts_info *ts_info);
-extern bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
-extern int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
-extern void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev);
+void efx_ptp_probe(struct efx_nic *efx);
+int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd);
+void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info);
+bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
+int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
+void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev);
 
 extern const struct efx_nic_type falcon_a1_nic_type;
 extern const struct efx_nic_type falcon_b0_nic_type;
@@ -563,7 +561,7 @@ extern const struct efx_nic_type efx_hunt_a0_nic_type;
  **************************************************************************
  */
 
-extern int falcon_probe_board(struct efx_nic *efx, u16 revision_info);
+int falcon_probe_board(struct efx_nic *efx, u16 revision_info);
 
 /* TX data path */
 static inline int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
@@ -631,58 +629,58 @@ static inline void efx_nic_eventq_read_ack(struct efx_channel *channel)
 {
        channel->efx->type->ev_read_ack(channel);
 }
-extern void efx_nic_event_test_start(struct efx_channel *channel);
+void efx_nic_event_test_start(struct efx_channel *channel);
 
 /* Falcon/Siena queue operations */
-extern int efx_farch_tx_probe(struct efx_tx_queue *tx_queue);
-extern void efx_farch_tx_init(struct efx_tx_queue *tx_queue);
-extern void efx_farch_tx_fini(struct efx_tx_queue *tx_queue);
-extern void efx_farch_tx_remove(struct efx_tx_queue *tx_queue);
-extern void efx_farch_tx_write(struct efx_tx_queue *tx_queue);
-extern int efx_farch_rx_probe(struct efx_rx_queue *rx_queue);
-extern void efx_farch_rx_init(struct efx_rx_queue *rx_queue);
-extern void efx_farch_rx_fini(struct efx_rx_queue *rx_queue);
-extern void efx_farch_rx_remove(struct efx_rx_queue *rx_queue);
-extern void efx_farch_rx_write(struct efx_rx_queue *rx_queue);
-extern void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue);
-extern int efx_farch_ev_probe(struct efx_channel *channel);
-extern int efx_farch_ev_init(struct efx_channel *channel);
-extern void efx_farch_ev_fini(struct efx_channel *channel);
-extern void efx_farch_ev_remove(struct efx_channel *channel);
-extern int efx_farch_ev_process(struct efx_channel *channel, int quota);
-extern void efx_farch_ev_read_ack(struct efx_channel *channel);
-extern void efx_farch_ev_test_generate(struct efx_channel *channel);
+int efx_farch_tx_probe(struct efx_tx_queue *tx_queue);
+void efx_farch_tx_init(struct efx_tx_queue *tx_queue);
+void efx_farch_tx_fini(struct efx_tx_queue *tx_queue);
+void efx_farch_tx_remove(struct efx_tx_queue *tx_queue);
+void efx_farch_tx_write(struct efx_tx_queue *tx_queue);
+int efx_farch_rx_probe(struct efx_rx_queue *rx_queue);
+void efx_farch_rx_init(struct efx_rx_queue *rx_queue);
+void efx_farch_rx_fini(struct efx_rx_queue *rx_queue);
+void efx_farch_rx_remove(struct efx_rx_queue *rx_queue);
+void efx_farch_rx_write(struct efx_rx_queue *rx_queue);
+void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue);
+int efx_farch_ev_probe(struct efx_channel *channel);
+int efx_farch_ev_init(struct efx_channel *channel);
+void efx_farch_ev_fini(struct efx_channel *channel);
+void efx_farch_ev_remove(struct efx_channel *channel);
+int efx_farch_ev_process(struct efx_channel *channel, int quota);
+void efx_farch_ev_read_ack(struct efx_channel *channel);
+void efx_farch_ev_test_generate(struct efx_channel *channel);
 
 /* Falcon/Siena filter operations */
-extern int efx_farch_filter_table_probe(struct efx_nic *efx);
-extern void efx_farch_filter_table_restore(struct efx_nic *efx);
-extern void efx_farch_filter_table_remove(struct efx_nic *efx);
-extern void efx_farch_filter_update_rx_scatter(struct efx_nic *efx);
-extern s32 efx_farch_filter_insert(struct efx_nic *efx,
-                                  struct efx_filter_spec *spec, bool replace);
-extern int efx_farch_filter_remove_safe(struct efx_nic *efx,
-                                       enum efx_filter_priority priority,
-                                       u32 filter_id);
-extern int efx_farch_filter_get_safe(struct efx_nic *efx,
-                                    enum efx_filter_priority priority,
-                                    u32 filter_id, struct efx_filter_spec *);
-extern void efx_farch_filter_clear_rx(struct efx_nic *efx,
-                                     enum efx_filter_priority priority);
-extern u32 efx_farch_filter_count_rx_used(struct efx_nic *efx,
-                                         enum efx_filter_priority priority);
-extern u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx);
-extern s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx,
-                                      enum efx_filter_priority priority,
-                                      u32 *buf, u32 size);
+int efx_farch_filter_table_probe(struct efx_nic *efx);
+void efx_farch_filter_table_restore(struct efx_nic *efx);
+void efx_farch_filter_table_remove(struct efx_nic *efx);
+void efx_farch_filter_update_rx_scatter(struct efx_nic *efx);
+s32 efx_farch_filter_insert(struct efx_nic *efx, struct efx_filter_spec *spec,
+                           bool replace);
+int efx_farch_filter_remove_safe(struct efx_nic *efx,
+                                enum efx_filter_priority priority,
+                                u32 filter_id);
+int efx_farch_filter_get_safe(struct efx_nic *efx,
+                             enum efx_filter_priority priority, u32 filter_id,
+                             struct efx_filter_spec *);
+void efx_farch_filter_clear_rx(struct efx_nic *efx,
+                              enum efx_filter_priority priority);
+u32 efx_farch_filter_count_rx_used(struct efx_nic *efx,
+                                  enum efx_filter_priority priority);
+u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx);
+s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx,
+                               enum efx_filter_priority priority, u32 *buf,
+                               u32 size);
 #ifdef CONFIG_RFS_ACCEL
-extern s32 efx_farch_filter_rfs_insert(struct efx_nic *efx,
-                                      struct efx_filter_spec *spec);
-extern bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
-                                           unsigned int index);
+s32 efx_farch_filter_rfs_insert(struct efx_nic *efx,
+                               struct efx_filter_spec *spec);
+bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
+                                    unsigned int index);
 #endif
-extern void efx_farch_filter_sync_rx_mode(struct efx_nic *efx);
+void efx_farch_filter_sync_rx_mode(struct efx_nic *efx);
 
-extern bool efx_nic_event_present(struct efx_channel *channel);
+bool efx_nic_event_present(struct efx_channel *channel);
 
 /* Some statistics are computed as A - B where A and B each increase
  * linearly with some hardware counter(s) and the counters are read
@@ -703,17 +701,17 @@ static inline void efx_update_diff_stat(u64 *stat, u64 diff)
 }
 
 /* Interrupts */
-extern int efx_nic_init_interrupt(struct efx_nic *efx);
-extern void efx_nic_irq_test_start(struct efx_nic *efx);
-extern void efx_nic_fini_interrupt(struct efx_nic *efx);
+int efx_nic_init_interrupt(struct efx_nic *efx);
+void efx_nic_irq_test_start(struct efx_nic *efx);
+void efx_nic_fini_interrupt(struct efx_nic *efx);
 
 /* Falcon/Siena interrupts */
-extern void efx_farch_irq_enable_master(struct efx_nic *efx);
-extern void efx_farch_irq_test_generate(struct efx_nic *efx);
-extern void efx_farch_irq_disable_master(struct efx_nic *efx);
-extern irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id);
-extern irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id);
-extern irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx);
+void efx_farch_irq_enable_master(struct efx_nic *efx);
+void efx_farch_irq_test_generate(struct efx_nic *efx);
+void efx_farch_irq_disable_master(struct efx_nic *efx);
+irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id);
+irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id);
+irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx);
 
 static inline int efx_nic_event_test_irq_cpu(struct efx_channel *channel)
 {
@@ -725,21 +723,21 @@ static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx)
 }
 
 /* Global Resources */
-extern int efx_nic_flush_queues(struct efx_nic *efx);
-extern void siena_prepare_flush(struct efx_nic *efx);
-extern int efx_farch_fini_dmaq(struct efx_nic *efx);
-extern void siena_finish_flush(struct efx_nic *efx);
-extern void falcon_start_nic_stats(struct efx_nic *efx);
-extern void falcon_stop_nic_stats(struct efx_nic *efx);
-extern int falcon_reset_xaui(struct efx_nic *efx);
-extern void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw);
-extern void efx_farch_init_common(struct efx_nic *efx);
-extern void efx_ef10_handle_drain_event(struct efx_nic *efx);
+int efx_nic_flush_queues(struct efx_nic *efx);
+void siena_prepare_flush(struct efx_nic *efx);
+int efx_farch_fini_dmaq(struct efx_nic *efx);
+void siena_finish_flush(struct efx_nic *efx);
+void falcon_start_nic_stats(struct efx_nic *efx);
+void falcon_stop_nic_stats(struct efx_nic *efx);
+int falcon_reset_xaui(struct efx_nic *efx);
+void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw);
+void efx_farch_init_common(struct efx_nic *efx);
+void efx_ef10_handle_drain_event(struct efx_nic *efx);
 static inline void efx_nic_push_rx_indir_table(struct efx_nic *efx)
 {
        efx->type->rx_push_indir_table(efx);
 }
-extern void efx_farch_rx_push_indir_table(struct efx_nic *efx);
+void efx_farch_rx_push_indir_table(struct efx_nic *efx);
 
 int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
                         unsigned int len, gfp_t gfp_flags);
@@ -750,24 +748,22 @@ struct efx_farch_register_test {
        unsigned address;
        efx_oword_t mask;
 };
-extern int efx_farch_test_registers(struct efx_nic *efx,
-                                   const struct efx_farch_register_test *regs,
-                                   size_t n_regs);
+int efx_farch_test_registers(struct efx_nic *efx,
+                            const struct efx_farch_register_test *regs,
+                            size_t n_regs);
 
-extern size_t efx_nic_get_regs_len(struct efx_nic *efx);
-extern void efx_nic_get_regs(struct efx_nic *efx, void *buf);
+size_t efx_nic_get_regs_len(struct efx_nic *efx);
+void efx_nic_get_regs(struct efx_nic *efx, void *buf);
 
-extern size_t
-efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
-                      const unsigned long *mask, u8 *names);
-extern void
-efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
-                    const unsigned long *mask,
-                    u64 *stats, const void *dma_buf, bool accumulate);
+size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
+                             const unsigned long *mask, u8 *names);
+void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
+                         const unsigned long *mask, u64 *stats,
+                         const void *dma_buf, bool accumulate);
 
 #define EFX_MAX_FLUSH_TIME 5000
 
-extern void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq,
-                                    efx_qword_t *event);
+void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq,
+                             efx_qword_t *event);
 
 #endif /* EFX_NIC_H */
index 45eeb70751562651b3a361e7b61f4ea4469b381b..803bf445c08e22df119c8274816cb26cf0b51a2b 100644 (file)
@@ -15,7 +15,7 @@
  */
 extern const struct efx_phy_operations falcon_sfx7101_phy_ops;
 
-extern void tenxpress_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
+void tenxpress_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
 
 /****************************************************************************
  * AMCC/Quake QT202x PHYs
@@ -34,7 +34,7 @@ extern const struct efx_phy_operations falcon_qt202x_phy_ops;
 #define QUAKE_LED_TXLINK       (0)
 #define QUAKE_LED_RXLINK       (8)
 
-extern void falcon_qt202x_set_led(struct efx_nic *p, int led, int state);
+void falcon_qt202x_set_led(struct efx_nic *p, int led, int state);
 
 /****************************************************************************
 * Transwitch CX4 retimer
@@ -44,7 +44,7 @@ extern const struct efx_phy_operations falcon_txc_phy_ops;
 #define TXC_GPIO_DIR_INPUT     0
 #define TXC_GPIO_DIR_OUTPUT    1
 
-extern void falcon_txc_set_gpio_dir(struct efx_nic *efx, int pin, int dir);
-extern void falcon_txc_set_gpio_val(struct efx_nic *efx, int pin, int val);
+void falcon_txc_set_gpio_dir(struct efx_nic *efx, int pin, int dir);
+void falcon_txc_set_gpio_val(struct efx_nic *efx, int pin, int val);
 
 #endif
index 87698ae0bf75f7dab78ca093ea2c3f3b3f1774d5..a2f4a06ffa4e4d8a6cb6829b45b8397ea10d4283 100644 (file)
@@ -43,13 +43,12 @@ struct efx_self_tests {
        struct efx_loopback_self_tests loopback[LOOPBACK_TEST_MAX + 1];
 };
 
-extern void efx_loopback_rx_packet(struct efx_nic *efx,
-                                  const char *buf_ptr, int pkt_len);
-extern int efx_selftest(struct efx_nic *efx,
-                       struct efx_self_tests *tests,
-                       unsigned flags);
-extern void efx_selftest_async_start(struct efx_nic *efx);
-extern void efx_selftest_async_cancel(struct efx_nic *efx);
-extern void efx_selftest_async_work(struct work_struct *data);
+void efx_loopback_rx_packet(struct efx_nic *efx, const char *buf_ptr,
+                           int pkt_len);
+int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
+                unsigned flags);
+void efx_selftest_async_start(struct efx_nic *efx);
+void efx_selftest_async_cancel(struct efx_nic *efx);
+void efx_selftest_async_work(struct work_struct *data);
 
 #endif /* EFX_SELFTEST_H */
index 770036bc2d87c9fde8335241a43b52a5df581a6f..513ed8b1ba582add37602ab4c93faaf462c7f3a3 100644 (file)
@@ -839,7 +839,7 @@ static int meth_probe(struct platform_device *pdev)
        dev->watchdog_timeo     = timeout;
        dev->irq                = MACE_ETHERNET_IRQ;
        dev->base_addr          = (unsigned long)&mace->eth;
-       memcpy(dev->dev_addr, o2meth_eaddr, 6);
+       memcpy(dev->dev_addr, o2meth_eaddr, ETH_ALEN);
 
        priv = netdev_priv(dev);
        spin_lock_init(&priv->meth_lock);
index 5fdbc2686eb3a2f6dda0a9f1f4339e641011ffb4..01f8459c321393342def894dbce875322eee3402 100644 (file)
@@ -2502,7 +2502,7 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
                SMSC_TRACE(pdata, probe,
                           "MAC Address is specified by configuration");
        } else if (is_valid_ether_addr(pdata->config.mac)) {
-               memcpy(dev->dev_addr, pdata->config.mac, 6);
+               memcpy(dev->dev_addr, pdata->config.mac, ETH_ALEN);
                SMSC_TRACE(pdata, probe,
                           "MAC Address specified by platform data");
        } else {
index 7eb8babed2cbe38f822aab596f2451d75112c9a4..fc94f202a43e40f24247019fb6c5906da8ac20be 100644 (file)
@@ -451,14 +451,14 @@ struct mac_device_info {
 struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr);
 struct mac_device_info *dwmac100_setup(void __iomem *ioaddr);
 
-extern void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
-                               unsigned int high, unsigned int low);
-extern void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
-                               unsigned int high, unsigned int low);
+void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
+                        unsigned int high, unsigned int low);
+void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
+                        unsigned int high, unsigned int low);
 
-extern void stmmac_set_mac(void __iomem *ioaddr, bool enable);
+void stmmac_set_mac(void __iomem *ioaddr, bool enable);
 
-extern void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
+void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
 extern const struct stmmac_ring_mode_ops ring_mode_ops;
 extern const struct stmmac_chain_mode_ops chain_mode_ops;
 
index 8e5662ce488bd80d2a0a7457c25fbf49ca523bb1..def266da55dbe617e8be83f7a3d630f8230b9c5c 100644 (file)
 #define DMA_STATUS_TI  0x00000001      /* Transmit Interrupt */
 #define DMA_CONTROL_FTF                0x00100000      /* Flush transmit FIFO */
 
-extern void dwmac_enable_dma_transmission(void __iomem *ioaddr);
-extern void dwmac_enable_dma_irq(void __iomem *ioaddr);
-extern void dwmac_disable_dma_irq(void __iomem *ioaddr);
-extern void dwmac_dma_start_tx(void __iomem *ioaddr);
-extern void dwmac_dma_stop_tx(void __iomem *ioaddr);
-extern void dwmac_dma_start_rx(void __iomem *ioaddr);
-extern void dwmac_dma_stop_rx(void __iomem *ioaddr);
-extern int dwmac_dma_interrupt(void __iomem *ioaddr,
-                              struct stmmac_extra_stats *x);
+void dwmac_enable_dma_transmission(void __iomem *ioaddr);
+void dwmac_enable_dma_irq(void __iomem *ioaddr);
+void dwmac_disable_dma_irq(void __iomem *ioaddr);
+void dwmac_dma_start_tx(void __iomem *ioaddr);
+void dwmac_dma_stop_tx(void __iomem *ioaddr);
+void dwmac_dma_start_rx(void __iomem *ioaddr);
+void dwmac_dma_stop_rx(void __iomem *ioaddr);
+int dwmac_dma_interrupt(void __iomem *ioaddr, struct stmmac_extra_stats *x);
 
 #endif /* __DWMAC_DMA_H__ */
index 48ec001566b5540421c5e5a25fe0b18db3c25e58..8607488cbcfcfaea7bc70510f0a3f601c5dade7d 100644 (file)
@@ -128,8 +128,8 @@ struct stmmac_counters {
        unsigned int mmc_rx_icmp_err_octets;
 };
 
-extern void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode);
-extern void dwmac_mmc_intr_all_mask(void __iomem *ioaddr);
-extern void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc);
+void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode);
+void dwmac_mmc_intr_all_mask(void __iomem *ioaddr);
+void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc);
 
 #endif /* __MMC_H__ */
index f16a9bdf45bb6738c7fca4063cf96307d6303462..22f89ffdfd95fc3e8ca530808e4d6d0dfdcfeaf2 100644 (file)
@@ -110,14 +110,14 @@ struct stmmac_priv {
 
 extern int phyaddr;
 
-extern int stmmac_mdio_unregister(struct net_device *ndev);
-extern int stmmac_mdio_register(struct net_device *ndev);
-extern void stmmac_set_ethtool_ops(struct net_device *netdev);
+int stmmac_mdio_unregister(struct net_device *ndev);
+int stmmac_mdio_register(struct net_device *ndev);
+void stmmac_set_ethtool_ops(struct net_device *netdev);
 extern const struct stmmac_desc_ops enh_desc_ops;
 extern const struct stmmac_desc_ops ndesc_ops;
 extern const struct stmmac_hwtimestamp stmmac_ptp;
-extern int stmmac_ptp_register(struct stmmac_priv *priv);
-extern void stmmac_ptp_unregister(struct stmmac_priv *priv);
+int stmmac_ptp_register(struct stmmac_priv *priv);
+void stmmac_ptp_unregister(struct stmmac_priv *priv);
 int stmmac_freeze(struct net_device *ndev);
 int stmmac_restore(struct net_device *ndev);
 int stmmac_resume(struct net_device *ndev);
index 759441b29e535b4c7b1c0f94e6f7520fc628fbdd..a72ecc42885d0ba28acb2d3c74e04678ee74e93a 100644 (file)
@@ -3354,7 +3354,7 @@ use_random_mac_addr:
 #if defined(CONFIG_SPARC)
        addr = of_get_property(cp->of_node, "local-mac-address", NULL);
        if (addr != NULL) {
-               memcpy(dev_addr, addr, 6);
+               memcpy(dev_addr, addr, ETH_ALEN);
                goto done;
        }
 #endif
index e62df2b81302bd32881ca22daef127e04985679e..a235bd9fd9804cce900f4eeb044837e995feb9eb 100644 (file)
@@ -2779,7 +2779,7 @@ static int gem_get_device_address(struct gem *gp)
                return -1;
 #endif
        }
-       memcpy(dev->dev_addr, addr, 6);
+       memcpy(dev->dev_addr, addr, ETH_ALEN);
 #else
        get_gem_mac_nonobp(gp->pdev, gp->dev->dev_addr);
 #endif
index e37b587b386048dd299ac6ad0af55dd47d3e199f..99043b74bf2baae3c43a40c7c3d21d0d5df9c6d9 100644 (file)
@@ -2675,10 +2675,10 @@ static int happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe)
 
                addr = of_get_property(dp, "local-mac-address", &len);
 
-               if (qfe_slot != -1 && addr && len == 6)
-                       memcpy(dev->dev_addr, addr, 6);
+               if (qfe_slot != -1 && addr && len == ETH_ALEN)
+                       memcpy(dev->dev_addr, addr, ETH_ALEN);
                else
-                       memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
+                       memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
        }
 
        hp = netdev_priv(dev);
@@ -3024,9 +3024,9 @@ static int happy_meal_pci_probe(struct pci_dev *pdev,
                    (addr = of_get_property(dp, "local-mac-address", &len))
                        != NULL &&
                    len == 6) {
-                       memcpy(dev->dev_addr, addr, 6);
+                       memcpy(dev->dev_addr, addr, ETH_ALEN);
                } else {
-                       memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
+                       memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
                }
 #else
                get_hme_mac_nonsparc(pdev, &dev->dev_addr[0]);
index b072f4dba033c1661bf4a341747b1d82d4874fc7..5695ae2411dea0f74718d3a87254aab4cf97ac42 100644 (file)
@@ -843,7 +843,7 @@ static int qec_ether_init(struct platform_device *op)
        if (!dev)
                return -ENOMEM;
 
-       memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
+       memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
 
        qe = netdev_priv(dev);
 
index e092edeab650a8f4dbae597b776a32725bf33ba4..148da9ae83666ce7cd2284c1cc75f18e9957f15f 100644 (file)
@@ -152,7 +152,7 @@ static struct platform_driver cpsw_phy_sel_driver = {
        .driver         = {
                .name   = "cpsw-phy-sel",
                .owner  = THIS_MODULE,
-               .of_match_table = of_match_ptr(cpsw_phy_sel_id_table),
+               .of_match_table = cpsw_phy_sel_id_table,
        },
 };
 
index 5efb37bf0681ff32203da5314aae517ac96c70b5..7290f11a937dae164c8fb3896475a7174769d77c 100644 (file)
@@ -2217,7 +2217,7 @@ static struct platform_driver cpsw_driver = {
                .name    = "cpsw",
                .owner   = THIS_MODULE,
                .pm      = &cpsw_pm_ops,
-               .of_match_table = of_match_ptr(cpsw_of_mtable),
+               .of_match_table = cpsw_of_mtable,
        },
        .probe = cpsw_probe,
        .remove = cpsw_remove,
index fe993cdd7e23b76ad13f09585c215d87c83a0e46..1a581ef7eee8fcbd2ed121d64a747e06e84c6113 100644 (file)
@@ -127,8 +127,8 @@ struct cpts {
 };
 
 #ifdef CONFIG_TI_CPTS
-extern void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb);
-extern void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb);
+void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb);
+void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb);
 #else
 static inline void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb)
 {
@@ -138,8 +138,7 @@ static inline void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb)
 }
 #endif
 
-extern int cpts_register(struct device *dev, struct cpts *cpts,
-                        u32 mult, u32 shift);
-extern void cpts_unregister(struct cpts *cpts);
+int cpts_register(struct device *dev, struct cpts *cpts, u32 mult, u32 shift);
+void cpts_unregister(struct cpts *cpts);
 
 #endif
index 67df09ea9d045da26420de1e9da09af58ec0edb8..fba1c489a91112dce8eaaf705bbf7513355bf734 100644 (file)
@@ -1853,7 +1853,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
        }
 
        /* MAC addr and PHY mask , RMII enable info from platform_data */
-       memcpy(priv->mac_addr, pdata->mac_addr, 6);
+       memcpy(priv->mac_addr, pdata->mac_addr, ETH_ALEN);
        priv->phy_id = pdata->phy_id;
        priv->rmii_en = pdata->rmii_en;
        priv->version = pdata->version;
index 13e6fff8ca23af28e4b1e2229846522082dd60c4..628b736e5ae776fcf00333bed8c355e4b518314e 100644 (file)
@@ -2230,7 +2230,7 @@ static void tile_net_dev_init(const char *name, const uint8_t *mac)
                nz_addr |= mac[i];
 
        if (nz_addr) {
-               memcpy(dev->dev_addr, mac, 6);
+               memcpy(dev->dev_addr, mac, ETH_ALEN);
                dev->addr_len = 6;
        } else {
                eth_hw_addr_random(dev);
index 309abb472aa2040e978fea46faff0cfd81c0bb43..8505196be9f52bd7a982ec9dcb91f850379cfd07 100644 (file)
@@ -359,27 +359,26 @@ static inline void *port_priv(struct gelic_port *port)
 }
 
 #ifdef CONFIG_PPC_EARLY_DEBUG_PS3GELIC
-extern void udbg_shutdown_ps3gelic(void);
+void udbg_shutdown_ps3gelic(void);
 #else
 static inline void udbg_shutdown_ps3gelic(void) {}
 #endif
 
-extern int gelic_card_set_irq_mask(struct gelic_card *card, u64 mask);
+int gelic_card_set_irq_mask(struct gelic_card *card, u64 mask);
 /* shared netdev ops */
-extern void gelic_card_up(struct gelic_card *card);
-extern void gelic_card_down(struct gelic_card *card);
-extern int gelic_net_open(struct net_device *netdev);
-extern int gelic_net_stop(struct net_device *netdev);
-extern int gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev);
-extern void gelic_net_set_multi(struct net_device *netdev);
-extern void gelic_net_tx_timeout(struct net_device *netdev);
-extern int gelic_net_change_mtu(struct net_device *netdev, int new_mtu);
-extern int gelic_net_setup_netdev(struct net_device *netdev,
-                                 struct gelic_card *card);
+void gelic_card_up(struct gelic_card *card);
+void gelic_card_down(struct gelic_card *card);
+int gelic_net_open(struct net_device *netdev);
+int gelic_net_stop(struct net_device *netdev);
+int gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev);
+void gelic_net_set_multi(struct net_device *netdev);
+void gelic_net_tx_timeout(struct net_device *netdev);
+int gelic_net_change_mtu(struct net_device *netdev, int new_mtu);
+int gelic_net_setup_netdev(struct net_device *netdev, struct gelic_card *card);
 
 /* shared ethtool ops */
-extern void gelic_net_get_drvinfo(struct net_device *netdev,
-                                 struct ethtool_drvinfo *info);
-extern void gelic_net_poll_controller(struct net_device *netdev);
+void gelic_net_get_drvinfo(struct net_device *netdev,
+                          struct ethtool_drvinfo *info);
+void gelic_net_poll_controller(struct net_device *netdev);
 
 #endif /* _GELIC_NET_H */
index f7e51b7d704960a2eb45ae63d4ed19f134d6d105..11f443d8e4ea9042b6eb6831b0c8d62612b8ad52 100644 (file)
@@ -320,7 +320,7 @@ struct gelic_eurus_cmd {
 #define GELIC_WL_PRIV_SET_PSK          (SIOCIWFIRSTPRIV + 0)
 #define GELIC_WL_PRIV_GET_PSK          (SIOCIWFIRSTPRIV + 1)
 
-extern int gelic_wl_driver_probe(struct gelic_card *card);
-extern int gelic_wl_driver_remove(struct gelic_card *card);
-extern void gelic_wl_interrupt(struct net_device *netdev, u64 status);
+int gelic_wl_driver_probe(struct gelic_card *card);
+int gelic_wl_driver_remove(struct gelic_card *card);
+void gelic_wl_interrupt(struct net_device *netdev, u64 status);
 #endif /* _GELIC_WIRELESS_H */
index 4ba2135474d1a118a7a86b579aae38b11eb2c56a..9b6af0845a1101deca4b2083412dd1764fe2cc5b 100644 (file)
@@ -29,8 +29,8 @@
 
 #include <linux/sungem_phy.h>
 
-extern int spider_net_stop(struct net_device *netdev);
-extern int spider_net_open(struct net_device *netdev);
+int spider_net_stop(struct net_device *netdev);
+int spider_net_open(struct net_device *netdev);
 
 extern const struct ethtool_ops spider_net_ethtool_ops;
 
index c8f088ab5fdfdbb6c9c757f5defd9c9b70588163..bdf697b184ae14246a4173d09833367ba312d7df 100644 (file)
@@ -32,7 +32,7 @@
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #define DRV_NAME       "via-rhine"
-#define DRV_VERSION    "1.5.0"
+#define DRV_VERSION    "1.5.1"
 #define DRV_RELDATE    "2010-10-09"
 
 #include <linux/types.h>
@@ -1704,7 +1704,12 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
                cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
 
        if (unlikely(vlan_tx_tag_present(skb))) {
-               rp->tx_ring[entry].tx_status = cpu_to_le32((vlan_tx_tag_get(skb)) << 16);
+               u16 vid_pcp = vlan_tx_tag_get(skb);
+
+               /* drop CFI/DEI bit, register needs VID and PCP */
+               vid_pcp = (vid_pcp & VLAN_VID_MASK) |
+                         ((vid_pcp & VLAN_PRIO_MASK) >> 1);
+               rp->tx_ring[entry].tx_status = cpu_to_le32((vid_pcp) << 16);
                /* request tagging */
                rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000);
        }
index b88121f240ca609ea26f911508b829c8fbbdbdd1..0029148077a9805a288a42a9d8207a97ce6e8133 100644 (file)
@@ -297,6 +297,12 @@ static int temac_dma_bd_init(struct net_device *ndev)
                       lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
        lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p);
 
+       /* Init descriptor indexes */
+       lp->tx_bd_ci = 0;
+       lp->tx_bd_next = 0;
+       lp->tx_bd_tail = 0;
+       lp->rx_bd_ci = 0;
+
        return 0;
 
 out:
index 80dd404178505b02d599ab788e7c6152086d1728..74234a51c851186c0c9bcfbc140b261724144e20 100644 (file)
@@ -1172,7 +1172,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
 
        if (mac_address)
                /* Set the MAC address. */
-               memcpy(ndev->dev_addr, mac_address, 6);
+               memcpy(ndev->dev_addr, mac_address, ETH_ALEN);
        else
                dev_warn(dev, "No MAC address found\n");
 
index a20ed1a98099f3d959317f68964bbd88639e3d8b..f8399359017405756f06641fa6c0405c5d5ea0a9 100644 (file)
@@ -453,7 +453,7 @@ static void directed_beacon(struct s_smc *smc)
         */
        * (char *) a = (char) ((long)DBEACON_INFO<<24L) ;
        a[1] = 0 ;
-       memcpy((char *)a+1,(char *) &smc->mib.m[MAC0].fddiMACUpstreamNbr,6) ;
+       memcpy((char *)a+1, (char *) &smc->mib.m[MAC0].fddiMACUpstreamNbr, ETH_ALEN);
 
        CHECK_NPP() ;
         /* set memory address reg for writes */
index 3ca308b282148832b534ae91f17f33991f6f6d16..bd1166bf8f61a55c0e3440d2762f1b1e0765cc7b 100644 (file)
@@ -469,20 +469,20 @@ struct s_smc {
 
 extern const struct fddi_addr fddi_broadcast;
 
-extern void all_selection_criteria(struct s_smc *smc);
-extern void card_stop(struct s_smc *smc);
-extern void init_board(struct s_smc *smc, u_char *mac_addr);
-extern int init_fplus(struct s_smc *smc);
-extern void init_plc(struct s_smc *smc);
-extern int init_smt(struct s_smc *smc, u_char * mac_addr);
-extern void mac1_irq(struct s_smc *smc, u_short stu, u_short stl);
-extern void mac2_irq(struct s_smc *smc, u_short code_s2u, u_short code_s2l);
-extern void mac3_irq(struct s_smc *smc, u_short code_s3u, u_short code_s3l);
-extern int pcm_status_twisted(struct s_smc *smc);
-extern void plc1_irq(struct s_smc *smc);
-extern void plc2_irq(struct s_smc *smc);
-extern void read_address(struct s_smc *smc, u_char * mac_addr);
-extern void timer_irq(struct s_smc *smc);
+void all_selection_criteria(struct s_smc *smc);
+void card_stop(struct s_smc *smc);
+void init_board(struct s_smc *smc, u_char *mac_addr);
+int init_fplus(struct s_smc *smc);
+void init_plc(struct s_smc *smc);
+int init_smt(struct s_smc *smc, u_char *mac_addr);
+void mac1_irq(struct s_smc *smc, u_short stu, u_short stl);
+void mac2_irq(struct s_smc *smc, u_short code_s2u, u_short code_s2l);
+void mac3_irq(struct s_smc *smc, u_short code_s3u, u_short code_s3l);
+int pcm_status_twisted(struct s_smc *smc);
+void plc1_irq(struct s_smc *smc);
+void plc2_irq(struct s_smc *smc);
+void read_address(struct s_smc *smc, u_char *mac_addr);
+void timer_irq(struct s_smc *smc);
 
 #endif /* _SCMECM_ */
 
index f5d7305a5784174f9868f045d186e9c90d2bbc20..713d303a06a9e1d2ea06e198a9682beb4bc54fc2 100644 (file)
@@ -436,7 +436,7 @@ static  int skfp_driver_init(struct net_device *dev)
        }
        read_address(smc, NULL);
        pr_debug("HW-Addr: %pMF\n", smc->hw.fddi_canon_addr.a);
-       memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, 6);
+       memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, ETH_ALEN);
 
        smt_reset_defaults(smc, 0);
 
@@ -503,7 +503,7 @@ static int skfp_open(struct net_device *dev)
         *               address.
         */
        read_address(smc, NULL);
-       memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, 6);
+       memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, ETH_ALEN);
 
        init_smt(smc, NULL);
        smt_online(smc, 1);
@@ -1213,7 +1213,7 @@ static void CheckSourceAddress(unsigned char *frame, unsigned char *hw_addr)
        if ((unsigned short) frame[1 + 10] != 0)
                return;
        SRBit = frame[1 + 6] & 0x01;
-       memcpy(&frame[1 + 6], hw_addr, 6);
+       memcpy(&frame[1 + 6], hw_addr, ETH_ALEN);
        frame[8] |= SRBit;
 }                              // CheckSourceAddress
 
index a974727dd9a2bb604746a20b3f4e65d22b9801db..636b65c66d49e6568b1e8535e66fac5b9a8dbd49 100644 (file)
@@ -445,7 +445,7 @@ static int ser12_open(struct net_device *dev)
        outb(0, FCR(dev->base_addr));  /* disable FIFOs */
        outb(0x0d, MCR(dev->base_addr));
        outb(0, IER(dev->base_addr));
-       if (request_irq(dev->irq, ser12_interrupt, IRQF_DISABLED | IRQF_SHARED,
+       if (request_irq(dev->irq, ser12_interrupt, IRQF_SHARED,
                        "baycom_ser_fdx", dev)) {
                release_region(dev->base_addr, SER12_EXTENT);
                return -EBUSY;
index e349d867449b2d744ff9550535317c93c069f07b..f9a8976195ba05f0fc07723969e5bb2136140a9b 100644 (file)
@@ -490,7 +490,7 @@ static int ser12_open(struct net_device *dev)
        outb(0, FCR(dev->base_addr));  /* disable FIFOs */
        outb(0x0d, MCR(dev->base_addr));
        outb(0, IER(dev->base_addr));
-       if (request_irq(dev->irq, ser12_interrupt, IRQF_DISABLED | IRQF_SHARED,
+       if (request_irq(dev->irq, ser12_interrupt, IRQF_SHARED,
                        "baycom_ser12", dev)) {
                release_region(dev->base_addr, SER12_EXTENT);       
                return -EBUSY;
index 6d5b1e2b12893271adb7fd7d4c373cde4823d151..f50b9c1c0639008b1e314099f4918e3eb0ed57db 100644 (file)
@@ -102,28 +102,29 @@ struct sir_driver {
 
 /* exported */
 
-extern int irda_register_dongle(struct dongle_driver *new);
-extern int irda_unregister_dongle(struct dongle_driver *drv);
+int irda_register_dongle(struct dongle_driver *new);
+int irda_unregister_dongle(struct dongle_driver *drv);
 
-extern struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *name);
-extern int sirdev_put_instance(struct sir_dev *self);
+struct sir_dev *sirdev_get_instance(const struct sir_driver *drv,
+                                   const char *name);
+int sirdev_put_instance(struct sir_dev *self);
 
-extern int sirdev_set_dongle(struct sir_dev *dev, IRDA_DONGLE type);
-extern void sirdev_write_complete(struct sir_dev *dev);
-extern int sirdev_receive(struct sir_dev *dev, const unsigned char *cp, size_t count);
+int sirdev_set_dongle(struct sir_dev *dev, IRDA_DONGLE type);
+void sirdev_write_complete(struct sir_dev *dev);
+int sirdev_receive(struct sir_dev *dev, const unsigned char *cp, size_t count);
 
 /* low level helpers for SIR device/dongle setup */
-extern int sirdev_raw_write(struct sir_dev *dev, const char *buf, int len);
-extern int sirdev_raw_read(struct sir_dev *dev, char *buf, int len);
-extern int sirdev_set_dtr_rts(struct sir_dev *dev, int dtr, int rts);
+int sirdev_raw_write(struct sir_dev *dev, const char *buf, int len);
+int sirdev_raw_read(struct sir_dev *dev, char *buf, int len);
+int sirdev_set_dtr_rts(struct sir_dev *dev, int dtr, int rts);
 
 /* not exported */
 
-extern int sirdev_get_dongle(struct sir_dev *self, IRDA_DONGLE type);
-extern int sirdev_put_dongle(struct sir_dev *self);
+int sirdev_get_dongle(struct sir_dev *self, IRDA_DONGLE type);
+int sirdev_put_dongle(struct sir_dev *self);
 
-extern void sirdev_enable_rx(struct sir_dev *dev);
-extern int sirdev_schedule_request(struct sir_dev *dev, int state, unsigned param);
+void sirdev_enable_rx(struct sir_dev *dev);
+int sirdev_schedule_request(struct sir_dev *dev, int state, unsigned param);
 
 /* inline helpers */
 
index ac22283aaf23213ab9bf9d931f76df602ccea09d..bc71947b1ec329f2eacd52b915b37ce56a4f16bb 100644 (file)
@@ -100,6 +100,45 @@ static void at803x_get_wol(struct phy_device *phydev,
                wol->wolopts |= WAKE_MAGIC;
 }
 
+static int at803x_suspend(struct phy_device *phydev)
+{
+       int value;
+       int wol_enabled;
+
+       mutex_lock(&phydev->lock);
+
+       value = phy_read(phydev, AT803X_INTR_ENABLE);
+       wol_enabled = value & AT803X_WOL_ENABLE;
+
+       value = phy_read(phydev, MII_BMCR);
+
+       if (wol_enabled)
+               value |= BMCR_ISOLATE;
+       else
+               value |= BMCR_PDOWN;
+
+       phy_write(phydev, MII_BMCR, value);
+
+       mutex_unlock(&phydev->lock);
+
+       return 0;
+}
+
+static int at803x_resume(struct phy_device *phydev)
+{
+       int value;
+
+       mutex_lock(&phydev->lock);
+
+       value = phy_read(phydev, MII_BMCR);
+       value &= ~(BMCR_PDOWN | BMCR_ISOLATE);
+       phy_write(phydev, MII_BMCR, value);
+
+       mutex_unlock(&phydev->lock);
+
+       return 0;
+}
+
 static int at803x_config_init(struct phy_device *phydev)
 {
        int val;
@@ -161,10 +200,12 @@ static struct phy_driver at803x_driver[] = {
        .config_init    = at803x_config_init,
        .set_wol        = at803x_set_wol,
        .get_wol        = at803x_get_wol,
+       .suspend        = at803x_suspend,
+       .resume         = at803x_resume,
        .features       = PHY_GBIT_FEATURES,
        .flags          = PHY_HAS_INTERRUPT,
-       .config_aneg    = &genphy_config_aneg,
-       .read_status    = &genphy_read_status,
+       .config_aneg    = genphy_config_aneg,
+       .read_status    = genphy_read_status,
        .driver         = {
                .owner = THIS_MODULE,
        },
@@ -176,10 +217,12 @@ static struct phy_driver at803x_driver[] = {
        .config_init    = at803x_config_init,
        .set_wol        = at803x_set_wol,
        .get_wol        = at803x_get_wol,
+       .suspend        = at803x_suspend,
+       .resume         = at803x_resume,
        .features       = PHY_GBIT_FEATURES,
        .flags          = PHY_HAS_INTERRUPT,
-       .config_aneg    = &genphy_config_aneg,
-       .read_status    = &genphy_read_status,
+       .config_aneg    = genphy_config_aneg,
+       .read_status    = genphy_read_status,
        .driver         = {
                .owner = THIS_MODULE,
        },
@@ -191,10 +234,12 @@ static struct phy_driver at803x_driver[] = {
        .config_init    = at803x_config_init,
        .set_wol        = at803x_set_wol,
        .get_wol        = at803x_get_wol,
+       .suspend        = at803x_suspend,
+       .resume         = at803x_resume,
        .features       = PHY_GBIT_FEATURES,
        .flags          = PHY_HAS_INTERRUPT,
-       .config_aneg    = &genphy_config_aneg,
-       .read_status    = &genphy_read_status,
+       .config_aneg    = genphy_config_aneg,
+       .read_status    = genphy_read_status,
        .driver         = {
                .owner = THIS_MODULE,
        },
index 2e91477362d4d70b15df3db59ad41dec990a3c57..2e3c778ea9bf6f0437f95759f12c902a76cf9e7c 100644 (file)
@@ -34,9 +34,9 @@
 #include <linux/marvell_phy.h>
 #include <linux/of.h>
 
-#include <asm/io.h>
+#include <linux/io.h>
 #include <asm/irq.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
 
 #define MII_MARVELL_PHY_PAGE           22
 
index 1f7bef90b46757a6ce2485c9aefd5d197464b7b1..7b4ff35c8bf7dcb28b455fb3a578eacddb298900 100644 (file)
@@ -1002,7 +1002,7 @@ plip_rewrite_address(const struct net_device *dev, struct ethhdr *eth)
                /* Any address will do - we take the first */
                const struct in_ifaddr *ifa = in_dev->ifa_list;
                if (ifa) {
-                       memcpy(eth->h_source, dev->dev_addr, 6);
+                       memcpy(eth->h_source, dev->dev_addr, ETH_ALEN);
                        memset(eth->h_dest, 0xfc, 2);
                        memcpy(eth->h_dest+2, &ifa->ifa_address, 4);
                }
index a34d6bf5e43b5b29325215e358cca9ee3fc734ee..cc70ecfc70626789183e462c8b51d13f0c7fc8aa 100644 (file)
@@ -429,11 +429,13 @@ static void slip_write_wakeup(struct tty_struct *tty)
        if (!sl || sl->magic != SLIP_MAGIC || !netif_running(sl->dev))
                return;
 
+       spin_lock(&sl->lock);
        if (sl->xleft <= 0)  {
                /* Now serial buffer is almost free & we can start
                 * transmission of another packet */
                sl->dev->stats.tx_packets++;
                clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+               spin_unlock(&sl->lock);
                sl_unlock(sl);
                return;
        }
@@ -441,6 +443,7 @@ static void slip_write_wakeup(struct tty_struct *tty)
        actual = tty->ops->write(tty, sl->xhead, sl->xleft);
        sl->xleft -= actual;
        sl->xhead += actual;
+       spin_unlock(&sl->lock);
 }
 
 static void sl_tx_timeout(struct net_device *dev)
index 8d5cac2d8e33bee6b2545d45e83d24f52f8ac4c3..df507e6dbb9c99d9a15f718128e008f06ba11cdb 100644 (file)
@@ -640,10 +640,10 @@ static void catc_set_multicast_list(struct net_device *netdev)
 {
        struct catc *catc = netdev_priv(netdev);
        struct netdev_hw_addr *ha;
-       u8 broadcast[6];
+       u8 broadcast[ETH_ALEN];
        u8 rx = RxEnable | RxPolarity | RxMultiCast;
 
-       memset(broadcast, 0xff, 6);
+       memset(broadcast, 0xff, ETH_ALEN);
        memset(catc->multicast, 0, 64);
 
        catc_multicast(broadcast, catc->multicast);
@@ -778,7 +778,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
        struct usb_device *usbdev = interface_to_usbdev(intf);
        struct net_device *netdev;
        struct catc *catc;
-       u8 broadcast[6];
+       u8 broadcast[ETH_ALEN];
        int i, pktsz;
 
        if (usb_set_interface(usbdev,
@@ -882,7 +882,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
                
                dev_dbg(dev, "Filling the multicast list.\n");
          
-               memset(broadcast, 0xff, 6);
+               memset(broadcast, 0xff, ETH_ALEN);
                catc_multicast(broadcast, catc->multicast);
                catc_multicast(netdev->dev_addr, catc->multicast);
                catc_write_mem(catc, 0xfa80, catc->multicast, 64);
index 2dbb9460349d3659fc738eabb35f01cb0022e049..c6867f926cffc18a981c7682e5493ae36924d988 100644 (file)
@@ -303,7 +303,7 @@ static void dm9601_set_multicast(struct net_device *net)
                rx_ctl |= 0x02;
        } else if (net->flags & IFF_ALLMULTI ||
                   netdev_mc_count(net) > DM_MAX_MCAST) {
-               rx_ctl |= 0x04;
+               rx_ctl |= 0x08;
        } else if (!netdev_mc_empty(net)) {
                struct netdev_hw_addr *ha;
 
index 6312332afeba283f192cfbf0b9ca07dfbb1e52ec..d03b6b6c64c0baaa925714b88847861a5313981e 100644 (file)
@@ -143,16 +143,22 @@ static const struct net_device_ops qmi_wwan_netdev_ops = {
        .ndo_validate_addr      = eth_validate_addr,
 };
 
-/* using a counter to merge subdriver requests with our own into a combined state */
+/* using a counter to merge subdriver requests with our own into a
+ * combined state
+ */
 static int qmi_wwan_manage_power(struct usbnet *dev, int on)
 {
        struct qmi_wwan_state *info = (void *)&dev->data;
        int rv = 0;
 
-       dev_dbg(&dev->intf->dev, "%s() pmcount=%d, on=%d\n", __func__, atomic_read(&info->pmcount), on);
+       dev_dbg(&dev->intf->dev, "%s() pmcount=%d, on=%d\n", __func__,
+               atomic_read(&info->pmcount), on);
 
-       if ((on && atomic_add_return(1, &info->pmcount) == 1) || (!on && atomic_dec_and_test(&info->pmcount))) {
-               /* need autopm_get/put here to ensure the usbcore sees the new value */
+       if ((on && atomic_add_return(1, &info->pmcount) == 1) ||
+           (!on && atomic_dec_and_test(&info->pmcount))) {
+               /* need autopm_get/put here to ensure the usbcore sees
+                * the new value
+                */
                rv = usb_autopm_get_interface(dev->intf);
                if (rv < 0)
                        goto err;
@@ -199,7 +205,8 @@ static int qmi_wwan_register_subdriver(struct usbnet *dev)
        atomic_set(&info->pmcount, 0);
 
        /* register subdriver */
-       subdriver = usb_cdc_wdm_register(info->control, &dev->status->desc, 4096, &qmi_wwan_cdc_wdm_manage_power);
+       subdriver = usb_cdc_wdm_register(info->control, &dev->status->desc,
+                                        4096, &qmi_wwan_cdc_wdm_manage_power);
        if (IS_ERR(subdriver)) {
                dev_err(&info->control->dev, "subdriver registration failed\n");
                rv = PTR_ERR(subdriver);
@@ -228,7 +235,8 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf)
        struct usb_driver *driver = driver_of(intf);
        struct qmi_wwan_state *info = (void *)&dev->data;
 
-       BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data) < sizeof(struct qmi_wwan_state)));
+       BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data) <
+                     sizeof(struct qmi_wwan_state)));
 
        /* set up initial state */
        info->control = intf;
@@ -250,7 +258,8 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf)
                                goto err;
                        }
                        if (h->bLength != sizeof(struct usb_cdc_header_desc)) {
-                               dev_dbg(&intf->dev, "CDC header len %u\n", h->bLength);
+                               dev_dbg(&intf->dev, "CDC header len %u\n",
+                                       h->bLength);
                                goto err;
                        }
                        break;
@@ -260,7 +269,8 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf)
                                goto err;
                        }
                        if (h->bLength != sizeof(struct usb_cdc_union_desc)) {
-                               dev_dbg(&intf->dev, "CDC union len %u\n", h->bLength);
+                               dev_dbg(&intf->dev, "CDC union len %u\n",
+                                       h->bLength);
                                goto err;
                        }
                        cdc_union = (struct usb_cdc_union_desc *)buf;
@@ -271,15 +281,15 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf)
                                goto err;
                        }
                        if (h->bLength != sizeof(struct usb_cdc_ether_desc)) {
-                               dev_dbg(&intf->dev, "CDC ether len %u\n",  h->bLength);
+                               dev_dbg(&intf->dev, "CDC ether len %u\n",
+                                       h->bLength);
                                goto err;
                        }
                        cdc_ether = (struct usb_cdc_ether_desc *)buf;
                        break;
                }
 
-               /*
-                * Remember which CDC functional descriptors we've seen.  Works
+               /* Remember which CDC functional descriptors we've seen.  Works
                 * for all types we care about, of which USB_CDC_ETHERNET_TYPE
                 * (0x0f) is the highest numbered
                 */
@@ -293,10 +303,14 @@ next_desc:
 
        /* Use separate control and data interfaces if we found a CDC Union */
        if (cdc_union) {
-               info->data = usb_ifnum_to_if(dev->udev, cdc_union->bSlaveInterface0);
-               if (desc->bInterfaceNumber != cdc_union->bMasterInterface0 || !info->data) {
-                       dev_err(&intf->dev, "bogus CDC Union: master=%u, slave=%u\n",
-                               cdc_union->bMasterInterface0, cdc_union->bSlaveInterface0);
+               info->data = usb_ifnum_to_if(dev->udev,
+                                            cdc_union->bSlaveInterface0);
+               if (desc->bInterfaceNumber != cdc_union->bMasterInterface0 ||
+                   !info->data) {
+                       dev_err(&intf->dev,
+                               "bogus CDC Union: master=%u, slave=%u\n",
+                               cdc_union->bMasterInterface0,
+                               cdc_union->bSlaveInterface0);
                        goto err;
                }
        }
@@ -374,8 +388,7 @@ static int qmi_wwan_suspend(struct usb_interface *intf, pm_message_t message)
        struct qmi_wwan_state *info = (void *)&dev->data;
        int ret;
 
-       /*
-        * Both usbnet_suspend() and subdriver->suspend() MUST return 0
+       /* Both usbnet_suspend() and subdriver->suspend() MUST return 0
         * in system sleep context, otherwise, the resume callback has
         * to recover device from previous suspend failure.
         */
@@ -383,7 +396,8 @@ static int qmi_wwan_suspend(struct usb_interface *intf, pm_message_t message)
        if (ret < 0)
                goto err;
 
-       if (intf == info->control && info->subdriver && info->subdriver->suspend)
+       if (intf == info->control && info->subdriver &&
+           info->subdriver->suspend)
                ret = info->subdriver->suspend(intf, message);
        if (ret < 0)
                usbnet_resume(intf);
@@ -396,7 +410,8 @@ static int qmi_wwan_resume(struct usb_interface *intf)
        struct usbnet *dev = usb_get_intfdata(intf);
        struct qmi_wwan_state *info = (void *)&dev->data;
        int ret = 0;
-       bool callsub = (intf == info->control && info->subdriver && info->subdriver->resume);
+       bool callsub = (intf == info->control && info->subdriver &&
+                       info->subdriver->resume);
 
        if (callsub)
                ret = info->subdriver->resume(intf);
@@ -714,7 +729,8 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x2357, 0x0201, 4)},    /* TP-LINK HSUPA Modem MA180 */
        {QMI_FIXED_INTF(0x2357, 0x9000, 4)},    /* TP-LINK MA260 */
        {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)},    /* Telit LE920 */
-       {QMI_FIXED_INTF(0x1e2d, 0x12d1, 4)},    /* Cinterion PLxx */
+       {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)},    /* Telit LE920 */
+       {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)},    /* Cinterion PLxx */
 
        /* 4. Gobi 1000 devices */
        {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)},    /* Acer Gobi Modem Device */
@@ -776,7 +792,8 @@ static const struct usb_device_id products[] = {
 };
 MODULE_DEVICE_TABLE(usb, products);
 
-static int qmi_wwan_probe(struct usb_interface *intf, const struct usb_device_id *prod)
+static int qmi_wwan_probe(struct usb_interface *intf,
+                         const struct usb_device_id *prod)
 {
        struct usb_device_id *id = (struct usb_device_id *)prod;
 
index 7b331e613e02aec22c3ac225e3a99c53c5f0f822..bf94e10a37c8e0121d783fc54c1b565b57a7ce21 100644 (file)
@@ -1241,7 +1241,9 @@ static int build_dma_sg(const struct sk_buff *skb, struct urb *urb)
        if (num_sgs == 1)
                return 0;
 
-       urb->sg = kmalloc(num_sgs * sizeof(struct scatterlist), GFP_ATOMIC);
+       /* reserve one for zero packet */
+       urb->sg = kmalloc((num_sgs + 1) * sizeof(struct scatterlist),
+                         GFP_ATOMIC);
        if (!urb->sg)
                return -ENOMEM;
 
@@ -1305,7 +1307,7 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
                if (build_dma_sg(skb, urb) < 0)
                        goto drop;
        }
-       entry->length = length = urb->transfer_buffer_length;
+       length = urb->transfer_buffer_length;
 
        /* don't assume the hardware handles USB_ZERO_PACKET
         * NOTE:  strictly conforming cdc-ether devices should expect
@@ -1317,15 +1319,18 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
        if (length % dev->maxpacket == 0) {
                if (!(info->flags & FLAG_SEND_ZLP)) {
                        if (!(info->flags & FLAG_MULTI_PACKET)) {
-                               urb->transfer_buffer_length++;
-                               if (skb_tailroom(skb)) {
+                               length++;
+                               if (skb_tailroom(skb) && !urb->num_sgs) {
                                        skb->data[skb->len] = 0;
                                        __skb_put(skb, 1);
-                               }
+                               } else if (urb->num_sgs)
+                                       sg_set_buf(&urb->sg[urb->num_sgs++],
+                                                       dev->padding_pkt, 1);
                        }
                } else
                        urb->transfer_flags |= URB_ZERO_PACKET;
        }
+       entry->length = urb->transfer_buffer_length = length;
 
        spin_lock_irqsave(&dev->txq.lock, flags);
        retval = usb_autopm_get_interface_async(dev->intf);
@@ -1509,6 +1514,7 @@ void usbnet_disconnect (struct usb_interface *intf)
 
        usb_kill_urb(dev->interrupt);
        usb_free_urb(dev->interrupt);
+       kfree(dev->padding_pkt);
 
        free_netdev(net);
 }
@@ -1679,9 +1685,16 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
        /* initialize max rx_qlen and tx_qlen */
        usbnet_update_max_qlen(dev);
 
+       if (dev->can_dma_sg && !(info->flags & FLAG_SEND_ZLP) &&
+               !(info->flags & FLAG_MULTI_PACKET)) {
+               dev->padding_pkt = kzalloc(1, GFP_KERNEL);
+               if (!dev->padding_pkt)
+                       goto out4;
+       }
+
        status = register_netdev (net);
        if (status)
-               goto out4;
+               goto out5;
        netif_info(dev, probe, dev->net,
                   "register '%s' at usb-%s-%s, %s, %pM\n",
                   udev->dev.driver->name,
@@ -1699,6 +1712,8 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
 
        return 0;
 
+out5:
+       kfree(dev->padding_pkt);
 out4:
        usb_free_urb(dev->interrupt);
 out3:
index a03f358fd58b9a44fc480ea33c67adf28e3cf0ef..12040a35d95d17223e5200b0a2716e288f31163d 100644 (file)
@@ -410,9 +410,9 @@ int
 vmxnet3_create_queues(struct vmxnet3_adapter *adapter,
                      u32 tx_ring_size, u32 rx_ring_size, u32 rx_ring2_size);
 
-extern void vmxnet3_set_ethtool_ops(struct net_device *netdev);
+void vmxnet3_set_ethtool_ops(struct net_device *netdev);
 
-extern struct rtnl_link_stats64 *
+struct rtnl_link_stats64 *
 vmxnet3_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats);
 
 extern char vmxnet3_driver_name[];
index d1292fe746bc2eb3962a57df299e49f7f9bc2d50..da8479479d01cfa9a01b17317ec24f2f4526c592 100644 (file)
@@ -952,8 +952,7 @@ void vxlan_sock_release(struct vxlan_sock *vs)
 
        spin_lock(&vn->sock_lock);
        hlist_del_rcu(&vs->hlist);
-       smp_wmb();
-       vs->sock->sk->sk_user_data = NULL;
+       rcu_assign_sk_user_data(vs->sock->sk, NULL);
        vxlan_notify_del_rx_port(sk);
        spin_unlock(&vn->sock_lock);
 
@@ -1048,8 +1047,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
 
        port = inet_sk(sk)->inet_sport;
 
-       smp_read_barrier_depends();
-       vs = (struct vxlan_sock *)sk->sk_user_data;
+       vs = rcu_dereference_sk_user_data(sk);
        if (!vs)
                goto drop;
 
@@ -2089,7 +2087,7 @@ static void vxlan_setup(struct net_device *dev)
        vxlan->age_timer.function = vxlan_cleanup;
        vxlan->age_timer.data = (unsigned long) vxlan;
 
-       inet_get_local_port_range(&low, &high);
+       inet_get_local_port_range(dev_net(dev), &low, &high);
        vxlan->port_min = low;
        vxlan->port_max = high;
        vxlan->dst_port = htons(vxlan_port);
@@ -2302,8 +2300,7 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
        atomic_set(&vs->refcnt, 1);
        vs->rcv = rcv;
        vs->data = data;
-       smp_wmb();
-       vs->sock->sk->sk_user_data = vs;
+       rcu_assign_sk_user_data(vs->sock->sk, vs);
 
        spin_lock(&vn->sock_lock);
        hlist_add_head_rcu(&vs->hlist, vs_head(net, port));
index 8f0fc2e57e2be06fce081c6f835bb0ea72243859..f57ee67836aea02fc58ff782e38a1b8e3302c94d 100644 (file)
@@ -41,6 +41,6 @@ struct x25_asy {
 
 #define X25_ASY_MAGIC 0x5303
 
-extern int x25_asy_init(struct net_device *dev);
+int x25_asy_init(struct net_device *dev);
 
 #endif /* _LINUX_X25_ASY.H */
index f29d554fc07d4f893c0857593f0b108fa9b4a614..2416a9d60bd69e4781efcede38e93af611264e00 100644 (file)
@@ -395,20 +395,19 @@ struct z8530_dev
 extern u8 z8530_dead_port[];
 extern u8 z8530_hdlc_kilostream_85230[];
 extern u8 z8530_hdlc_kilostream[];
-extern irqreturn_t z8530_interrupt(int, void *);
-extern void z8530_describe(struct z8530_dev *, char *mapping, unsigned long io);
-extern int z8530_init(struct z8530_dev *);
-extern int z8530_shutdown(struct z8530_dev *);
-extern int z8530_sync_open(struct net_device *, struct z8530_channel *);
-extern int z8530_sync_close(struct net_device *, struct z8530_channel *);
-extern int z8530_sync_dma_open(struct net_device *, struct z8530_channel *);
-extern int z8530_sync_dma_close(struct net_device *, struct z8530_channel *);
-extern int z8530_sync_txdma_open(struct net_device *, struct z8530_channel *);
-extern int z8530_sync_txdma_close(struct net_device *, struct z8530_channel *);
-extern int z8530_channel_load(struct z8530_channel *, u8 *);
-extern netdev_tx_t z8530_queue_xmit(struct z8530_channel *c,
-                                         struct sk_buff *skb);
-extern void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb);
+irqreturn_t z8530_interrupt(int, void *);
+void z8530_describe(struct z8530_dev *, char *mapping, unsigned long io);
+int z8530_init(struct z8530_dev *);
+int z8530_shutdown(struct z8530_dev *);
+int z8530_sync_open(struct net_device *, struct z8530_channel *);
+int z8530_sync_close(struct net_device *, struct z8530_channel *);
+int z8530_sync_dma_open(struct net_device *, struct z8530_channel *);
+int z8530_sync_dma_close(struct net_device *, struct z8530_channel *);
+int z8530_sync_txdma_open(struct net_device *, struct z8530_channel *);
+int z8530_sync_txdma_close(struct net_device *, struct z8530_channel *);
+int z8530_channel_load(struct z8530_channel *, u8 *);
+netdev_tx_t z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb);
+void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb);
 
 
 /*
index 9f1e947f3557823bc228549a2fd9b0636fed55b2..649ecad6844c73baa58a9e8b0e1850b9fdb88b36 100644 (file)
@@ -256,21 +256,20 @@ void i2400mu_init(struct i2400mu *i2400mu)
        i2400mu->rx_size_auto_shrink = 1;
 }
 
-extern int i2400mu_notification_setup(struct i2400mu *);
-extern void i2400mu_notification_release(struct i2400mu *);
+int i2400mu_notification_setup(struct i2400mu *);
+void i2400mu_notification_release(struct i2400mu *);
 
-extern int i2400mu_rx_setup(struct i2400mu *);
-extern void i2400mu_rx_release(struct i2400mu *);
-extern void i2400mu_rx_kick(struct i2400mu *);
+int i2400mu_rx_setup(struct i2400mu *);
+void i2400mu_rx_release(struct i2400mu *);
+void i2400mu_rx_kick(struct i2400mu *);
 
-extern int i2400mu_tx_setup(struct i2400mu *);
-extern void i2400mu_tx_release(struct i2400mu *);
-extern void i2400mu_bus_tx_kick(struct i2400m *);
+int i2400mu_tx_setup(struct i2400mu *);
+void i2400mu_tx_release(struct i2400mu *);
+void i2400mu_bus_tx_kick(struct i2400m *);
 
-extern ssize_t i2400mu_bus_bm_cmd_send(struct i2400m *,
-                                      const struct i2400m_bootrom_header *,
-                                      size_t, int);
-extern ssize_t i2400mu_bus_bm_wait_for_ack(struct i2400m *,
-                                          struct i2400m_bootrom_header *,
-                                          size_t);
+ssize_t i2400mu_bus_bm_cmd_send(struct i2400m *,
+                               const struct i2400m_bootrom_header *, size_t,
+                               int);
+ssize_t i2400mu_bus_bm_wait_for_ack(struct i2400m *,
+                                   struct i2400m_bootrom_header *, size_t);
 #endif /* #ifndef __I2400M_USB_H__ */
index 79c6505b5c2030c0f3b0813b97aef5fdc8780278..5a34e72bab9afb42d670f7ea72286c9e191a31e2 100644 (file)
@@ -710,18 +710,18 @@ enum i2400m_bri {
        I2400M_BRI_MAC_REINIT = 1 << 3,
 };
 
-extern void i2400m_bm_cmd_prepare(struct i2400m_bootrom_header *);
-extern int i2400m_dev_bootstrap(struct i2400m *, enum i2400m_bri);
-extern int i2400m_read_mac_addr(struct i2400m *);
-extern int i2400m_bootrom_init(struct i2400m *, enum i2400m_bri);
-extern int i2400m_is_boot_barker(struct i2400m *, const void *, size_t);
+void i2400m_bm_cmd_prepare(struct i2400m_bootrom_header *);
+int i2400m_dev_bootstrap(struct i2400m *, enum i2400m_bri);
+int i2400m_read_mac_addr(struct i2400m *);
+int i2400m_bootrom_init(struct i2400m *, enum i2400m_bri);
+int i2400m_is_boot_barker(struct i2400m *, const void *, size_t);
 static inline
 int i2400m_is_d2h_barker(const void *buf)
 {
        const __le32 *barker = buf;
        return le32_to_cpu(*barker) == I2400M_D2H_MSG_BARKER;
 }
-extern void i2400m_unknown_barker(struct i2400m *, const void *, size_t);
+void i2400m_unknown_barker(struct i2400m *, const void *, size_t);
 
 /* Make/grok boot-rom header commands */
 
@@ -789,32 +789,31 @@ unsigned i2400m_brh_get_signature(const struct i2400m_bootrom_header *hdr)
 /*
  * Driver / device setup and internal functions
  */
-extern void i2400m_init(struct i2400m *);
-extern int i2400m_reset(struct i2400m *, enum i2400m_reset_type);
-extern void i2400m_netdev_setup(struct net_device *net_dev);
-extern int i2400m_sysfs_setup(struct device_driver *);
-extern void i2400m_sysfs_release(struct device_driver *);
-extern int i2400m_tx_setup(struct i2400m *);
-extern void i2400m_wake_tx_work(struct work_struct *);
-extern void i2400m_tx_release(struct i2400m *);
-
-extern int i2400m_rx_setup(struct i2400m *);
-extern void i2400m_rx_release(struct i2400m *);
-
-extern void i2400m_fw_cache(struct i2400m *);
-extern void i2400m_fw_uncache(struct i2400m *);
-
-extern void i2400m_net_rx(struct i2400m *, struct sk_buff *, unsigned,
-                         const void *, int);
-extern void i2400m_net_erx(struct i2400m *, struct sk_buff *,
-                          enum i2400m_cs);
-extern void i2400m_net_wake_stop(struct i2400m *);
+void i2400m_init(struct i2400m *);
+int i2400m_reset(struct i2400m *, enum i2400m_reset_type);
+void i2400m_netdev_setup(struct net_device *net_dev);
+int i2400m_sysfs_setup(struct device_driver *);
+void i2400m_sysfs_release(struct device_driver *);
+int i2400m_tx_setup(struct i2400m *);
+void i2400m_wake_tx_work(struct work_struct *);
+void i2400m_tx_release(struct i2400m *);
+
+int i2400m_rx_setup(struct i2400m *);
+void i2400m_rx_release(struct i2400m *);
+
+void i2400m_fw_cache(struct i2400m *);
+void i2400m_fw_uncache(struct i2400m *);
+
+void i2400m_net_rx(struct i2400m *, struct sk_buff *, unsigned, const void *,
+                  int);
+void i2400m_net_erx(struct i2400m *, struct sk_buff *, enum i2400m_cs);
+void i2400m_net_wake_stop(struct i2400m *);
 enum i2400m_pt;
-extern int i2400m_tx(struct i2400m *, const void *, size_t, enum i2400m_pt);
+int i2400m_tx(struct i2400m *, const void *, size_t, enum i2400m_pt);
 
 #ifdef CONFIG_DEBUG_FS
-extern int i2400m_debugfs_add(struct i2400m *);
-extern void i2400m_debugfs_rm(struct i2400m *);
+int i2400m_debugfs_add(struct i2400m *);
+void i2400m_debugfs_rm(struct i2400m *);
 #else
 static inline int i2400m_debugfs_add(struct i2400m *i2400m)
 {
@@ -824,8 +823,8 @@ static inline void i2400m_debugfs_rm(struct i2400m *i2400m) {}
 #endif
 
 /* Initialize/shutdown the device */
-extern int i2400m_dev_initialize(struct i2400m *);
-extern void i2400m_dev_shutdown(struct i2400m *);
+int i2400m_dev_initialize(struct i2400m *);
+void i2400m_dev_shutdown(struct i2400m *);
 
 extern struct attribute_group i2400m_dev_attr_group;
 
@@ -873,21 +872,21 @@ void i2400m_put(struct i2400m *i2400m)
        dev_put(i2400m->wimax_dev.net_dev);
 }
 
-extern int i2400m_dev_reset_handle(struct i2400m *, const char *);
-extern int i2400m_pre_reset(struct i2400m *);
-extern int i2400m_post_reset(struct i2400m *);
-extern void i2400m_error_recovery(struct i2400m *);
+int i2400m_dev_reset_handle(struct i2400m *, const char *);
+int i2400m_pre_reset(struct i2400m *);
+int i2400m_post_reset(struct i2400m *);
+void i2400m_error_recovery(struct i2400m *);
 
 /*
  * _setup()/_release() are called by the probe/disconnect functions of
  * the bus-specific drivers.
  */
-extern int i2400m_setup(struct i2400m *, enum i2400m_bri bm_flags);
-extern void i2400m_release(struct i2400m *);
+int i2400m_setup(struct i2400m *, enum i2400m_bri bm_flags);
+void i2400m_release(struct i2400m *);
 
-extern int i2400m_rx(struct i2400m *, struct sk_buff *);
-extern struct i2400m_msg_hdr *i2400m_tx_msg_get(struct i2400m *, size_t *);
-extern void i2400m_tx_msg_sent(struct i2400m *);
+int i2400m_rx(struct i2400m *, struct sk_buff *);
+struct i2400m_msg_hdr *i2400m_tx_msg_get(struct i2400m *, size_t *);
+void i2400m_tx_msg_sent(struct i2400m *);
 
 
 /*
@@ -900,20 +899,19 @@ struct device *i2400m_dev(struct i2400m *i2400m)
        return i2400m->wimax_dev.net_dev->dev.parent;
 }
 
-extern int i2400m_msg_check_status(const struct i2400m_l3l4_hdr *,
-                                  char *, size_t);
-extern int i2400m_msg_size_check(struct i2400m *,
-                                const struct i2400m_l3l4_hdr *, size_t);
-extern struct sk_buff *i2400m_msg_to_dev(struct i2400m *, const void *, size_t);
-extern void i2400m_msg_to_dev_cancel_wait(struct i2400m *, int);
-extern void i2400m_report_hook(struct i2400m *,
-                              const struct i2400m_l3l4_hdr *, size_t);
-extern void i2400m_report_hook_work(struct work_struct *);
-extern int i2400m_cmd_enter_powersave(struct i2400m *);
-extern int i2400m_cmd_exit_idle(struct i2400m *);
-extern struct sk_buff *i2400m_get_device_info(struct i2400m *);
-extern int i2400m_firmware_check(struct i2400m *);
-extern int i2400m_set_idle_timeout(struct i2400m *, unsigned);
+int i2400m_msg_check_status(const struct i2400m_l3l4_hdr *, char *, size_t);
+int i2400m_msg_size_check(struct i2400m *, const struct i2400m_l3l4_hdr *,
+                         size_t);
+struct sk_buff *i2400m_msg_to_dev(struct i2400m *, const void *, size_t);
+void i2400m_msg_to_dev_cancel_wait(struct i2400m *, int);
+void i2400m_report_hook(struct i2400m *, const struct i2400m_l3l4_hdr *,
+                       size_t);
+void i2400m_report_hook_work(struct work_struct *);
+int i2400m_cmd_enter_powersave(struct i2400m *);
+int i2400m_cmd_exit_idle(struct i2400m *);
+struct sk_buff *i2400m_get_device_info(struct i2400m *);
+int i2400m_firmware_check(struct i2400m *);
+int i2400m_set_idle_timeout(struct i2400m *, unsigned);
 
 static inline
 struct usb_endpoint_descriptor *usb_get_epd(struct usb_interface *iface, int ep)
@@ -921,10 +919,9 @@ struct usb_endpoint_descriptor *usb_get_epd(struct usb_interface *iface, int ep)
        return &iface->cur_altsetting->endpoint[ep].desc;
 }
 
-extern int i2400m_op_rfkill_sw_toggle(struct wimax_dev *,
-                                     enum wimax_rf_state);
-extern void i2400m_report_tlv_rf_switches_status(
-       struct i2400m *, const struct i2400m_tlv_rf_switches_status *);
+int i2400m_op_rfkill_sw_toggle(struct wimax_dev *, enum wimax_rf_state);
+void i2400m_report_tlv_rf_switches_status(struct i2400m *,
+                                         const struct i2400m_tlv_rf_switches_status *);
 
 /*
  * Helpers for firmware backwards compatibility
@@ -968,8 +965,8 @@ void __i2400m_msleep(unsigned ms)
 
 
 /* module initialization helpers */
-extern int i2400m_barker_db_init(const char *);
-extern void i2400m_barker_db_exit(void);
+int i2400m_barker_db_init(const char *);
+void i2400m_barker_db_exit(void);
 
 
 
index 168140c54028eb16d7482ecbb7a6a835d4807736..bb00633203973a5ebc743f5438d6100d79aa2a82 100644 (file)
@@ -37,9 +37,9 @@ enum ath10k_debug_mask {
 
 extern unsigned int ath10k_debug_mask;
 
-extern __printf(1, 2) int ath10k_info(const char *fmt, ...);
-extern __printf(1, 2) int ath10k_err(const char *fmt, ...);
-extern __printf(1, 2) int ath10k_warn(const char *fmt, ...);
+__printf(1, 2) int ath10k_info(const char *fmt, ...);
+__printf(1, 2) int ath10k_err(const char *fmt, ...);
+__printf(1, 2) int ath10k_warn(const char *fmt, ...);
 
 #ifdef CONFIG_ATH10K_DEBUGFS
 int ath10k_debug_create(struct ath10k *ar);
@@ -68,7 +68,7 @@ static inline void ath10k_debug_read_target_stats(struct ath10k *ar,
 #endif /* CONFIG_ATH10K_DEBUGFS */
 
 #ifdef CONFIG_ATH10K_DEBUG
-extern __printf(2, 3) void ath10k_dbg(enum ath10k_debug_mask mask,
+__printf(2, 3) void ath10k_dbg(enum ath10k_debug_mask mask,
                                      const char *fmt, ...);
 void ath10k_dbg_dump(enum ath10k_debug_mask mask,
                     const char *msg, const char *prefix,
index 55f90c761868ddd9fa228510efb27465d360c00b..48d44e7f386cc336099e579bed0302b16c0eea19 100644 (file)
@@ -1758,7 +1758,7 @@ int ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
        cmd = (struct wmi_vdev_up_cmd *)skb->data;
        cmd->vdev_id       = __cpu_to_le32(vdev_id);
        cmd->vdev_assoc_id = __cpu_to_le32(aid);
-       memcpy(&cmd->vdev_bssid.addr, bssid, 6);
+       memcpy(&cmd->vdev_bssid.addr, bssid, ETH_ALEN);
 
        ath10k_dbg(ATH10K_DBG_WMI,
                   "wmi mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
@@ -1918,7 +1918,7 @@ int ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id,
        cmd->vdev_id     = __cpu_to_le32(vdev_id);
        cmd->param_id    = __cpu_to_le32(param_id);
        cmd->param_value = __cpu_to_le32(param_value);
-       memcpy(&cmd->peer_macaddr.addr, peer_addr, 6);
+       memcpy(&cmd->peer_macaddr.addr, peer_addr, ETH_ALEN);
 
        ath10k_dbg(ATH10K_DBG_WMI,
                   "wmi vdev %d peer 0x%pM set param %d value %d\n",
index 98a886154d9cc59775eaa0c17c74305452e52e8e..05debf700a846db00f55e0071df53e207d2e6c63 100644 (file)
@@ -22,8 +22,7 @@
 
 #define ATH6KL_MAX_IE                  256
 
-extern __printf(2, 3)
-int ath6kl_printk(const char *level, const char *fmt, ...);
+__printf(2, 3) int ath6kl_printk(const char *level, const char *fmt, ...);
 
 /*
  * Reflects the version of binary interface exposed by ATH6KL target
index 74369de00fb57a40dcebd72ced2002d0cb571b24..ca9ba005f2871f3e42bbc914bb5ca90f6e8b90e9 100644 (file)
@@ -50,11 +50,10 @@ enum ATH6K_DEBUG_MASK {
 };
 
 extern unsigned int debug_mask;
-extern __printf(2, 3)
-int ath6kl_printk(const char *level, const char *fmt, ...);
-extern __printf(1, 2) int ath6kl_info(const char *fmt, ...);
-extern __printf(1, 2) int ath6kl_err(const char *fmt, ...);
-extern __printf(1, 2) int ath6kl_warn(const char *fmt, ...);
+__printf(2, 3) int ath6kl_printk(const char *level, const char *fmt, ...);
+__printf(1, 2) int ath6kl_info(const char *fmt, ...);
+__printf(1, 2) int ath6kl_err(const char *fmt, ...);
+__printf(1, 2) int ath6kl_warn(const char *fmt, ...);
 
 enum ath6kl_war {
        ATH6KL_WAR_INVALID_RATE,
index 2ee35f677c0e8843cdc66534fd40cde15755275b..da24ba2a5b41cfb1193fbc393de3a93b102e41f4 100644 (file)
@@ -952,7 +952,7 @@ void ath9k_ps_restore(struct ath_softc *sc);
 u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate);
 
 void ath_start_rfkill_poll(struct ath_softc *sc);
-extern void ath9k_rfkill_poll_state(struct ieee80211_hw *hw);
+void ath9k_rfkill_poll_state(struct ieee80211_hw *hw);
 void ath9k_calculate_iter_data(struct ieee80211_hw *hw,
                               struct ieee80211_vif *vif,
                               struct ath9k_vif_iter_data *iter_data);
index 4ee472a5a4e4ee6e81b1c0ffc820b6f685fb7ad1..ab9e3a8410bc2065fff9cb58996645fbf67be4fc 100644 (file)
@@ -1269,13 +1269,6 @@ static void ath9k_antenna_check(struct ath_softc *sc,
        if (!(ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB))
                return;
 
-       /*
-        * All MPDUs in an aggregate will use the same LNA
-        * as the first MPDU.
-        */
-       if (rs->rs_isaggr && !rs->rs_firstaggr)
-               return;
-
        /*
         * Change the default rx antenna if rx diversity
         * chooses the other antenna 3 times in a row.
index 35b515fe3ffa41e00dc614b6590cc2eabead9de4..5ac713d2ff5d22dc6d976291c6d97098bfbceafd 100644 (file)
@@ -399,6 +399,7 @@ static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
        tbf->bf_buf_addr = bf->bf_buf_addr;
        memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
        tbf->bf_state = bf->bf_state;
+       tbf->bf_state.stale = false;
 
        return tbf;
 }
@@ -1389,11 +1390,15 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
                      u16 tid, u16 *ssn)
 {
        struct ath_atx_tid *txtid;
+       struct ath_txq *txq;
        struct ath_node *an;
        u8 density;
 
        an = (struct ath_node *)sta->drv_priv;
        txtid = ATH_AN_2_TID(an, tid);
+       txq = txtid->ac->txq;
+
+       ath_txq_lock(sc, txq);
 
        /* update ampdu factor/density, they may have changed. This may happen
         * in HT IBSS when a beacon with HT-info is received after the station
@@ -1417,6 +1422,8 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
        memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
        txtid->baw_head = txtid->baw_tail = 0;
 
+       ath_txq_unlock_complete(sc, txq);
+
        return 0;
 }
 
@@ -1555,8 +1562,10 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
                        __skb_unlink(bf->bf_mpdu, tid_q);
                        list_add_tail(&bf->list, &bf_q);
                        ath_set_rates(tid->an->vif, tid->an->sta, bf);
-                       ath_tx_addto_baw(sc, tid, bf);
-                       bf->bf_state.bf_type &= ~BUF_AGGR;
+                       if (bf_isampdu(bf)) {
+                               ath_tx_addto_baw(sc, tid, bf);
+                               bf->bf_state.bf_type &= ~BUF_AGGR;
+                       }
                        if (bf_tail)
                                bf_tail->bf_next = bf;
 
@@ -1950,7 +1959,9 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
                        if (bf_is_ampdu_not_probing(bf))
                                txq->axq_ampdu_depth++;
 
-                       bf = bf->bf_lastbf->bf_next;
+                       bf_last = bf->bf_lastbf;
+                       bf = bf_last->bf_next;
+                       bf_last->bf_next = NULL;
                }
        }
 }
index 61c302a6bdeaa38ef1a1d20b3ac1e477db794745..5b340769d5bb2196bf3d0192e007ed13f23236fc 100644 (file)
@@ -316,8 +316,8 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
        }
        conn.channel = ch - 1;
 
-       memcpy(conn.bssid, bss->bssid, 6);
-       memcpy(conn.dst_mac, bss->bssid, 6);
+       memcpy(conn.bssid, bss->bssid, ETH_ALEN);
+       memcpy(conn.dst_mac, bss->bssid, ETH_ALEN);
        /*
         * FW don't support scan after connection attempt
         */
index b827d51c30a37b93f1747d7df9b2049aeb97e25f..a55ae6494c3b6400d28c029af5a0e7a4770c6680 100644 (file)
@@ -844,18 +844,18 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
        if (priv->wep_is_on)
                frame_ctl |= IEEE80211_FCTL_PROTECTED;
        if (priv->operating_mode == IW_MODE_ADHOC) {
-               skb_copy_from_linear_data(skb, &header.addr1, 6);
-               memcpy(&header.addr2, dev->dev_addr, 6);
-               memcpy(&header.addr3, priv->BSSID, 6);
+               skb_copy_from_linear_data(skb, &header.addr1, ETH_ALEN);
+               memcpy(&header.addr2, dev->dev_addr, ETH_ALEN);
+               memcpy(&header.addr3, priv->BSSID, ETH_ALEN);
        } else {
                frame_ctl |= IEEE80211_FCTL_TODS;
-               memcpy(&header.addr1, priv->CurrentBSSID, 6);
-               memcpy(&header.addr2, dev->dev_addr, 6);
-               skb_copy_from_linear_data(skb, &header.addr3, 6);
+               memcpy(&header.addr1, priv->CurrentBSSID, ETH_ALEN);
+               memcpy(&header.addr2, dev->dev_addr, ETH_ALEN);
+               skb_copy_from_linear_data(skb, &header.addr3, ETH_ALEN);
        }
 
        if (priv->use_wpa)
-               memcpy(&header.addr4, SNAP_RFC1024, 6);
+               memcpy(&header.addr4, SNAP_RFC1024, ETH_ALEN);
 
        header.frame_control = cpu_to_le16(frame_ctl);
        /* Copy the wireless header into the card */
@@ -929,11 +929,11 @@ static void fast_rx_path(struct atmel_private *priv,
                }
        }
 
-       memcpy(skbp, header->addr1, 6); /* destination address */
+       memcpy(skbp, header->addr1, ETH_ALEN); /* destination address */
        if (le16_to_cpu(header->frame_control) & IEEE80211_FCTL_FROMDS)
-               memcpy(&skbp[6], header->addr3, 6);
+               memcpy(&skbp[ETH_ALEN], header->addr3, ETH_ALEN);
        else
-               memcpy(&skbp[6], header->addr2, 6); /* source address */
+               memcpy(&skbp[ETH_ALEN], header->addr2, ETH_ALEN); /* source address */
 
        skb->protocol = eth_type_trans(skb, priv->dev);
        skb->ip_summed = CHECKSUM_NONE;
@@ -969,14 +969,14 @@ static void frag_rx_path(struct atmel_private *priv,
                         u16 msdu_size, u16 rx_packet_loc, u32 crc, u16 seq_no,
                         u8 frag_no, int more_frags)
 {
-       u8 mac4[6];
-       u8 source[6];
+       u8 mac4[ETH_ALEN];
+       u8 source[ETH_ALEN];
        struct sk_buff *skb;
 
        if (le16_to_cpu(header->frame_control) & IEEE80211_FCTL_FROMDS)
-               memcpy(source, header->addr3, 6);
+               memcpy(source, header->addr3, ETH_ALEN);
        else
-               memcpy(source, header->addr2, 6);
+               memcpy(source, header->addr2, ETH_ALEN);
 
        rx_packet_loc += 24; /* skip header */
 
@@ -984,9 +984,9 @@ static void frag_rx_path(struct atmel_private *priv,
                msdu_size -= 4;
 
        if (frag_no == 0) { /* first fragment */
-               atmel_copy_to_host(priv->dev, mac4, rx_packet_loc, 6);
-               msdu_size -= 6;
-               rx_packet_loc += 6;
+               atmel_copy_to_host(priv->dev, mac4, rx_packet_loc, ETH_ALEN);
+               msdu_size -= ETH_ALEN;
+               rx_packet_loc += ETH_ALEN;
 
                if (priv->do_rx_crc)
                        crc = crc32_le(crc, mac4, 6);
@@ -994,9 +994,9 @@ static void frag_rx_path(struct atmel_private *priv,
                priv->frag_seq = seq_no;
                priv->frag_no = 1;
                priv->frag_len = msdu_size;
-               memcpy(priv->frag_source, source, 6);
-               memcpy(&priv->rx_buf[6], source, 6);
-               memcpy(priv->rx_buf, header->addr1, 6);
+               memcpy(priv->frag_source, source, ETH_ALEN);
+               memcpy(&priv->rx_buf[ETH_ALEN], source, ETH_ALEN);
+               memcpy(priv->rx_buf, header->addr1, ETH_ALEN);
 
                atmel_copy_to_host(priv->dev, &priv->rx_buf[12], rx_packet_loc, msdu_size);
 
@@ -1006,13 +1006,13 @@ static void frag_rx_path(struct atmel_private *priv,
                        atmel_copy_to_host(priv->dev, (void *)&netcrc, rx_packet_loc + msdu_size, 4);
                        if ((crc ^ 0xffffffff) != netcrc) {
                                priv->dev->stats.rx_crc_errors++;
-                               memset(priv->frag_source, 0xff, 6);
+                               memset(priv->frag_source, 0xff, ETH_ALEN);
                        }
                }
 
        } else if (priv->frag_no == frag_no &&
                   priv->frag_seq == seq_no &&
-                  memcmp(priv->frag_source, source, 6) == 0) {
+                  memcmp(priv->frag_source, source, ETH_ALEN) == 0) {
 
                atmel_copy_to_host(priv->dev, &priv->rx_buf[12 + priv->frag_len],
                                   rx_packet_loc, msdu_size);
@@ -1024,7 +1024,7 @@ static void frag_rx_path(struct atmel_private *priv,
                        atmel_copy_to_host(priv->dev, (void *)&netcrc, rx_packet_loc + msdu_size, 4);
                        if ((crc ^ 0xffffffff) != netcrc) {
                                priv->dev->stats.rx_crc_errors++;
-                               memset(priv->frag_source, 0xff, 6);
+                               memset(priv->frag_source, 0xff, ETH_ALEN);
                                more_frags = 1; /* don't send broken assembly */
                        }
                }
@@ -1033,7 +1033,7 @@ static void frag_rx_path(struct atmel_private *priv,
                priv->frag_no++;
 
                if (!more_frags) { /* last one */
-                       memset(priv->frag_source, 0xff, 6);
+                       memset(priv->frag_source, 0xff, ETH_ALEN);
                        if (!(skb = dev_alloc_skb(priv->frag_len + 14))) {
                                priv->dev->stats.rx_dropped++;
                        } else {
@@ -1129,7 +1129,7 @@ static void rx_done_irq(struct atmel_private *priv)
                        atmel_copy_to_host(priv->dev, (unsigned char *)&priv->rx_buf, rx_packet_loc + 24, msdu_size);
 
                        /* we use the same buffer for frag reassembly and control packets */
-                       memset(priv->frag_source, 0xff, 6);
+                       memset(priv->frag_source, 0xff, ETH_ALEN);
 
                        if (priv->do_rx_crc) {
                                /* last 4 octets is crc */
@@ -1557,7 +1557,7 @@ struct net_device *init_atmel_card(unsigned short irq, unsigned long port,
        priv->last_qual = jiffies;
        priv->last_beacon_timestamp = 0;
        memset(priv->frag_source, 0xff, sizeof(priv->frag_source));
-       memset(priv->BSSID, 0, 6);
+       memset(priv->BSSID, 0, ETH_ALEN);
        priv->CurrentBSSID[0] = 0xFF; /* Initialize to something invalid.... */
        priv->station_was_associated = 0;
 
@@ -1718,7 +1718,7 @@ static int atmel_get_wap(struct net_device *dev,
                         char *extra)
 {
        struct atmel_private *priv = netdev_priv(dev);
-       memcpy(awrq->sa_data, priv->CurrentBSSID, 6);
+       memcpy(awrq->sa_data, priv->CurrentBSSID, ETH_ALEN);
        awrq->sa_family = ARPHRD_ETHER;
 
        return 0;
@@ -2356,7 +2356,7 @@ static int atmel_get_scan(struct net_device *dev,
        for (i = 0; i < priv->BSS_list_entries; i++) {
                iwe.cmd = SIOCGIWAP;
                iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
-               memcpy(iwe.u.ap_addr.sa_data, priv->BSSinfo[i].BSSID, 6);
+               memcpy(iwe.u.ap_addr.sa_data, priv->BSSinfo[i].BSSID, ETH_ALEN);
                current_ev = iwe_stream_add_event(info, current_ev,
                                                  extra + IW_SCAN_MAX_DATA,
                                                  &iwe, IW_EV_ADDR_LEN);
@@ -2760,7 +2760,7 @@ static void atmel_enter_state(struct atmel_private *priv, int new_state)
 static void atmel_scan(struct atmel_private *priv, int specific_ssid)
 {
        struct {
-               u8 BSSID[6];
+               u8 BSSID[ETH_ALEN];
                u8 SSID[MAX_SSID_LENGTH];
                u8 scan_type;
                u8 channel;
@@ -2771,7 +2771,7 @@ static void atmel_scan(struct atmel_private *priv, int specific_ssid)
                u8 SSID_size;
        } cmd;
 
-       memset(cmd.BSSID, 0xff, 6);
+       memset(cmd.BSSID, 0xff, ETH_ALEN);
 
        if (priv->fast_scan) {
                cmd.SSID_size = priv->SSID_size;
@@ -2816,7 +2816,7 @@ static void join(struct atmel_private *priv, int type)
 
        cmd.SSID_size = priv->SSID_size;
        memcpy(cmd.SSID, priv->SSID, priv->SSID_size);
-       memcpy(cmd.BSSID, priv->CurrentBSSID, 6);
+       memcpy(cmd.BSSID, priv->CurrentBSSID, ETH_ALEN);
        cmd.channel = (priv->channel & 0x7f);
        cmd.BSS_type = type;
        cmd.timeout = cpu_to_le16(2000);
@@ -2837,7 +2837,7 @@ static void start(struct atmel_private *priv, int type)
 
        cmd.SSID_size = priv->SSID_size;
        memcpy(cmd.SSID, priv->SSID, priv->SSID_size);
-       memcpy(cmd.BSSID, priv->BSSID, 6);
+       memcpy(cmd.BSSID, priv->BSSID, ETH_ALEN);
        cmd.BSS_type = type;
        cmd.channel = (priv->channel & 0x7f);
 
@@ -2883,9 +2883,9 @@ static void send_authentication_request(struct atmel_private *priv, u16 system,
        header.frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_AUTH);
        header.duration_id = cpu_to_le16(0x8000);
        header.seq_ctrl = 0;
-       memcpy(header.addr1, priv->CurrentBSSID, 6);
-       memcpy(header.addr2, priv->dev->dev_addr, 6);
-       memcpy(header.addr3, priv->CurrentBSSID, 6);
+       memcpy(header.addr1, priv->CurrentBSSID, ETH_ALEN);
+       memcpy(header.addr2, priv->dev->dev_addr, ETH_ALEN);
+       memcpy(header.addr3, priv->CurrentBSSID, ETH_ALEN);
 
        if (priv->wep_is_on && priv->CurrentAuthentTransactionSeqNum != 1)
                /* no WEP for authentication frames with TrSeqNo 1 */
@@ -2916,7 +2916,7 @@ static void send_association_request(struct atmel_private *priv, int is_reassoc)
        struct ass_req_format {
                __le16 capability;
                __le16 listen_interval;
-               u8 ap[6]; /* nothing after here directly accessible */
+               u8 ap[ETH_ALEN]; /* nothing after here directly accessible */
                u8 ssid_el_id;
                u8 ssid_len;
                u8 ssid[MAX_SSID_LENGTH];
@@ -2930,9 +2930,9 @@ static void send_association_request(struct atmel_private *priv, int is_reassoc)
        header.duration_id = cpu_to_le16(0x8000);
        header.seq_ctrl = 0;
 
-       memcpy(header.addr1, priv->CurrentBSSID, 6);
-       memcpy(header.addr2, priv->dev->dev_addr, 6);
-       memcpy(header.addr3, priv->CurrentBSSID, 6);
+       memcpy(header.addr1, priv->CurrentBSSID, ETH_ALEN);
+       memcpy(header.addr2, priv->dev->dev_addr, ETH_ALEN);
+       memcpy(header.addr3, priv->CurrentBSSID, ETH_ALEN);
 
        body.capability = cpu_to_le16(WLAN_CAPABILITY_ESS);
        if (priv->wep_is_on)
@@ -2944,7 +2944,7 @@ static void send_association_request(struct atmel_private *priv, int is_reassoc)
 
        /* current AP address - only in reassoc frame */
        if (is_reassoc) {
-               memcpy(body.ap, priv->CurrentBSSID, 6);
+               memcpy(body.ap, priv->CurrentBSSID, ETH_ALEN);
                ssid_el_p = &body.ssid_el_id;
                bodysize = 18 + priv->SSID_size;
        } else {
@@ -3021,7 +3021,7 @@ static void store_bss_info(struct atmel_private *priv,
        int i, index;
 
        for (index = -1, i = 0; i < priv->BSS_list_entries; i++)
-               if (memcmp(bss, priv->BSSinfo[i].BSSID, 6) == 0)
+               if (memcmp(bss, priv->BSSinfo[i].BSSID, ETH_ALEN) == 0)
                        index = i;
 
        /* If we process a probe and an entry from this BSS exists
@@ -3032,7 +3032,7 @@ static void store_bss_info(struct atmel_private *priv,
                if (priv->BSS_list_entries == MAX_BSS_ENTRIES)
                        return;
                index = priv->BSS_list_entries++;
-               memcpy(priv->BSSinfo[index].BSSID, bss, 6);
+               memcpy(priv->BSSinfo[index].BSSID, bss, ETH_ALEN);
                priv->BSSinfo[index].RSSI = rssi;
        } else {
                if (rssi > priv->BSSinfo[index].RSSI)
@@ -3235,7 +3235,7 @@ static void atmel_join_bss(struct atmel_private *priv, int bss_index)
 {
        struct bss_info *bss =  &priv->BSSinfo[bss_index];
 
-       memcpy(priv->CurrentBSSID, bss->BSSID, 6);
+       memcpy(priv->CurrentBSSID, bss->BSSID, ETH_ALEN);
        memcpy(priv->SSID, bss->SSID, priv->SSID_size = bss->SSIDsize);
 
        /* The WPA stuff cares about the current AP address */
@@ -3767,7 +3767,7 @@ static int probe_atmel_card(struct net_device *dev)
                                0x00, 0x04, 0x25, 0x00, 0x00, 0x00
                        };
                        printk(KERN_ALERT "%s: *** Invalid MAC address. UPGRADE Firmware ****\n", dev->name);
-                       memcpy(dev->dev_addr, default_mac, 6);
+                       memcpy(dev->dev_addr, default_mac, ETH_ALEN);
                }
        }
 
@@ -3819,7 +3819,7 @@ static void build_wpa_mib(struct atmel_private *priv)
 
        struct { /* NB this is matched to the hardware, don't change. */
                u8 cipher_default_key_value[MAX_ENCRYPTION_KEYS][MAX_ENCRYPTION_KEY_SIZE];
-               u8 receiver_address[6];
+               u8 receiver_address[ETH_ALEN];
                u8 wep_is_on;
                u8 default_key; /* 0..3 */
                u8 group_key;
@@ -3837,7 +3837,7 @@ static void build_wpa_mib(struct atmel_private *priv)
 
        mib.wep_is_on = priv->wep_is_on;
        mib.exclude_unencrypted = priv->exclude_unencrypted;
-       memcpy(mib.receiver_address, priv->CurrentBSSID, 6);
+       memcpy(mib.receiver_address, priv->CurrentBSSID, ETH_ALEN);
 
        /* zero all the keys before adding in valid ones. */
        memset(mib.cipher_default_key_value, 0, sizeof(mib.cipher_default_key_value));
index 8cb206a89083aaa314868ef2c07b906181fe6689..4ae63f4ddfb20394d1a38c4847b5c5732887f277 100644 (file)
@@ -278,7 +278,7 @@ int b43_generate_txhdr(struct b43_wldev *dev,
        else
                txhdr->phy_rate = b43_plcp_get_ratecode_cck(rate);
        txhdr->mac_frame_ctl = wlhdr->frame_control;
-       memcpy(txhdr->tx_receiver, wlhdr->addr1, 6);
+       memcpy(txhdr->tx_receiver, wlhdr->addr1, ETH_ALEN);
 
        /* Calculate duration for fallback rate */
        if ((rate_fb == rate) ||
index 849a28c803023e03570d69ca8d6de8f60462a6c6..86588c9ff0f2b6cddee0b3a7da5455fd05b29a9b 100644 (file)
@@ -215,7 +215,7 @@ static int generate_txhdr_fw3(struct b43legacy_wldev *dev,
        rate_fb_ofdm = b43legacy_is_ofdm_rate(rate_fb->hw_value);
 
        txhdr->mac_frame_ctl = wlhdr->frame_control;
-       memcpy(txhdr->tx_receiver, wlhdr->addr1, 6);
+       memcpy(txhdr->tx_receiver, wlhdr->addr1, ETH_ALEN);
 
        /* Calculate duration for fallback rate */
        if ((rate_fb->hw_value == rate) ||
index 64f4a2bc8ddedf6c131d8b8d70408acb3ba17d9b..c3462b75bd080d4f7e9d72dd979f86bd5c894319 100644 (file)
@@ -464,8 +464,6 @@ static struct sdio_driver brcmf_sdmmc_driver = {
 
 static int brcmf_sdio_pd_probe(struct platform_device *pdev)
 {
-       int ret;
-
        brcmf_dbg(SDIO, "Enter\n");
 
        brcmfmac_sdio_pdata = pdev->dev.platform_data;
@@ -473,11 +471,7 @@ static int brcmf_sdio_pd_probe(struct platform_device *pdev)
        if (brcmfmac_sdio_pdata->power_on)
                brcmfmac_sdio_pdata->power_on();
 
-       ret = sdio_register_driver(&brcmf_sdmmc_driver);
-       if (ret)
-               brcmf_err("sdio_register_driver failed: %d\n", ret);
-
-       return ret;
+       return 0;
 }
 
 static int brcmf_sdio_pd_remove(struct platform_device *pdev)
@@ -500,6 +494,15 @@ static struct platform_driver brcmf_sdio_pd = {
        }
 };
 
+void brcmf_sdio_register(void)
+{
+       int ret;
+
+       ret = sdio_register_driver(&brcmf_sdmmc_driver);
+       if (ret)
+               brcmf_err("sdio_register_driver failed: %d\n", ret);
+}
+
 void brcmf_sdio_exit(void)
 {
        brcmf_dbg(SDIO, "Enter\n");
@@ -510,18 +513,13 @@ void brcmf_sdio_exit(void)
                sdio_unregister_driver(&brcmf_sdmmc_driver);
 }
 
-void brcmf_sdio_init(void)
+void __init brcmf_sdio_init(void)
 {
        int ret;
 
        brcmf_dbg(SDIO, "Enter\n");
 
        ret = platform_driver_probe(&brcmf_sdio_pd, brcmf_sdio_pd_probe);
-       if (ret == -ENODEV) {
-               brcmf_dbg(SDIO, "No platform data available, registering without.\n");
-               ret = sdio_register_driver(&brcmf_sdmmc_driver);
-       }
-
-       if (ret)
-               brcmf_err("driver registration failed: %d\n", ret);
+       if (ret == -ENODEV)
+               brcmf_dbg(SDIO, "No platform data available.\n");
 }
index 2eb9e642c9bf80d604bea8d201d86a0bb5c1a524..34af9d183107b6c0afa20c52c3aefc10d8aa42e1 100644 (file)
@@ -632,29 +632,29 @@ struct brcmf_skb_reorder_data {
        u8 *reorder;
 };
 
-extern int brcmf_netdev_wait_pend8021x(struct net_device *ndev);
+int brcmf_netdev_wait_pend8021x(struct net_device *ndev);
 
 /* Return pointer to interface name */
-extern char *brcmf_ifname(struct brcmf_pub *drvr, int idx);
+char *brcmf_ifname(struct brcmf_pub *drvr, int idx);
 
 /* Query dongle */
-extern int brcmf_proto_cdc_query_dcmd(struct brcmf_pub *drvr, int ifidx,
-                                      uint cmd, void *buf, uint len);
-extern int brcmf_proto_cdc_set_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
-                                   void *buf, uint len);
+int brcmf_proto_cdc_query_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
+                              void *buf, uint len);
+int brcmf_proto_cdc_set_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
+                            void *buf, uint len);
 
 /* Remove any protocol-specific data header. */
-extern int brcmf_proto_hdrpull(struct brcmf_pub *drvr, bool do_fws, u8 *ifidx,
-                              struct sk_buff *rxp);
+int brcmf_proto_hdrpull(struct brcmf_pub *drvr, bool do_fws, u8 *ifidx,
+                       struct sk_buff *rxp);
 
-extern int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked);
-extern struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx,
-                                    s32 ifidx, char *name, u8 *mac_addr);
-extern void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx);
+int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked);
+struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx, s32 ifidx,
+                             char *name, u8 *mac_addr);
+void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx);
 void brcmf_txflowblock_if(struct brcmf_if *ifp,
                          enum brcmf_netif_stop_reason reason, bool state);
-extern u32 brcmf_get_chip_info(struct brcmf_if *ifp);
-extern void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp,
-                            bool success);
+u32 brcmf_get_chip_info(struct brcmf_if *ifp);
+void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp,
+                     bool success);
 
 #endif                         /* _BRCMF_H_ */
index f7c1985844e44c3ed0464a4e21588d538f693959..7f1340d03f18690fd08ab52c545cc8530a3c6de8 100644 (file)
@@ -132,34 +132,34 @@ struct pktq *brcmf_bus_gettxq(struct brcmf_bus *bus)
  * interface functions from common layer
  */
 
-extern bool brcmf_c_prec_enq(struct device *dev, struct pktq *q,
-                        struct sk_buff *pkt, int prec);
+bool brcmf_c_prec_enq(struct device *dev, struct pktq *q, struct sk_buff *pkt,
+                     int prec);
 
 /* Receive frame for delivery to OS.  Callee disposes of rxp. */
-extern void brcmf_rx_frames(struct device *dev, struct sk_buff_head *rxlist);
+void brcmf_rx_frames(struct device *dev, struct sk_buff_head *rxlist);
 
 /* Indication from bus module regarding presence/insertion of dongle. */
-extern int brcmf_attach(uint bus_hdrlen, struct device *dev);
+int brcmf_attach(uint bus_hdrlen, struct device *dev);
 /* Indication from bus module regarding removal/absence of dongle */
-extern void brcmf_detach(struct device *dev);
+void brcmf_detach(struct device *dev);
 /* Indication from bus module that dongle should be reset */
-extern void brcmf_dev_reset(struct device *dev);
+void brcmf_dev_reset(struct device *dev);
 /* Indication from bus module to change flow-control state */
-extern void brcmf_txflowblock(struct device *dev, bool state);
+void brcmf_txflowblock(struct device *dev, bool state);
 
 /* Notify the bus has transferred the tx packet to firmware */
-extern void brcmf_txcomplete(struct device *dev, struct sk_buff *txp,
-                            bool success);
+void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success);
 
-extern int brcmf_bus_start(struct device *dev);
+int brcmf_bus_start(struct device *dev);
 
 #ifdef CONFIG_BRCMFMAC_SDIO
-extern void brcmf_sdio_exit(void);
-extern void brcmf_sdio_init(void);
+void brcmf_sdio_exit(void);
+void brcmf_sdio_init(void);
+void brcmf_sdio_register(void);
 #endif
 #ifdef CONFIG_BRCMFMAC_USB
-extern void brcmf_usb_exit(void);
-extern void brcmf_usb_init(void);
+void brcmf_usb_exit(void);
+void brcmf_usb_register(void);
 #endif
 
 #endif                         /* _BRCMF_BUS_H_ */
index e067aec1fbf113220d1a1054ca3dfceb027448a2..40e7f854e10f9634b44e4475b3c78cab02ea4dc9 100644 (file)
@@ -1231,21 +1231,23 @@ u32 brcmf_get_chip_info(struct brcmf_if *ifp)
        return bus->chip << 4 | bus->chiprev;
 }
 
-static void brcmf_driver_init(struct work_struct *work)
+static void brcmf_driver_register(struct work_struct *work)
 {
-       brcmf_debugfs_init();
-
 #ifdef CONFIG_BRCMFMAC_SDIO
-       brcmf_sdio_init();
+       brcmf_sdio_register();
 #endif
 #ifdef CONFIG_BRCMFMAC_USB
-       brcmf_usb_init();
+       brcmf_usb_register();
 #endif
 }
-static DECLARE_WORK(brcmf_driver_work, brcmf_driver_init);
+static DECLARE_WORK(brcmf_driver_work, brcmf_driver_register);
 
 static int __init brcmfmac_module_init(void)
 {
+       brcmf_debugfs_init();
+#ifdef CONFIG_BRCMFMAC_SDIO
+       brcmf_sdio_init();
+#endif
        if (!schedule_work(&brcmf_driver_work))
                return -EBUSY;
 
index ef9179883748f0aa9637900a6e7bab1b476cd937..53c6e710f2cb243c9997e9bb6f453a60b3f7634b 100644 (file)
  */
 
 /* Linkage, sets prot link and updates hdrlen in pub */
-extern int brcmf_proto_attach(struct brcmf_pub *drvr);
+int brcmf_proto_attach(struct brcmf_pub *drvr);
 
 /* Unlink, frees allocated protocol memory (including brcmf_proto) */
-extern void brcmf_proto_detach(struct brcmf_pub *drvr);
+void brcmf_proto_detach(struct brcmf_pub *drvr);
 
 /* Stop protocol: sync w/dongle state. */
-extern void brcmf_proto_stop(struct brcmf_pub *drvr);
+void brcmf_proto_stop(struct brcmf_pub *drvr);
 
 /* Add any protocol-specific data header.
  * Caller must reserve prot_hdrlen prepend space.
  */
-extern void brcmf_proto_hdrpush(struct brcmf_pub *, int ifidx, u8 offset,
-                               struct sk_buff *txp);
+void brcmf_proto_hdrpush(struct brcmf_pub *, int ifidx, u8 offset,
+                        struct sk_buff *txp);
 
 /* Sets dongle media info (drv_version, mac address). */
-extern int brcmf_c_preinit_dcmds(struct brcmf_if *ifp);
+int brcmf_c_preinit_dcmds(struct brcmf_if *ifp);
 
 #endif                         /* _BRCMF_PROTO_H_ */
index 83c041f1bf4ad154afb0822d734818a7561f9ad6..f0780ee056026f7de1214a507797ea1df256c764 100644 (file)
@@ -215,17 +215,16 @@ struct sdpcmd_regs {
        u16 PAD[0x80];
 };
 
-extern int brcmf_sdio_chip_attach(struct brcmf_sdio_dev *sdiodev,
-                                 struct chip_info **ci_ptr, u32 regs);
-extern void brcmf_sdio_chip_detach(struct chip_info **ci_ptr);
-extern void brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
-                                             struct chip_info *ci,
-                                             u32 drivestrength);
-extern u8 brcmf_sdio_chip_getinfidx(struct chip_info *ci, u16 coreid);
-extern void brcmf_sdio_chip_enter_download(struct brcmf_sdio_dev *sdiodev,
-                                          struct chip_info *ci);
-extern bool brcmf_sdio_chip_exit_download(struct brcmf_sdio_dev *sdiodev,
-                                         struct chip_info *ci, char *nvram_dat,
-                                         uint nvram_sz);
+int brcmf_sdio_chip_attach(struct brcmf_sdio_dev *sdiodev,
+                          struct chip_info **ci_ptr, u32 regs);
+void brcmf_sdio_chip_detach(struct chip_info **ci_ptr);
+void brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
+                                      struct chip_info *ci, u32 drivestrength);
+u8 brcmf_sdio_chip_getinfidx(struct chip_info *ci, u16 coreid);
+void brcmf_sdio_chip_enter_download(struct brcmf_sdio_dev *sdiodev,
+                                   struct chip_info *ci);
+bool brcmf_sdio_chip_exit_download(struct brcmf_sdio_dev *sdiodev,
+                                  struct chip_info *ci, char *nvram_dat,
+                                  uint nvram_sz);
 
 #endif         /* _BRCMFMAC_SDIO_CHIP_H_ */
index 2b5407f002e53bf90b8e320d0b54384b2a200f9b..c9b06b4e71f7d5be0f88400d9da26e15cb39e9f9 100644 (file)
@@ -181,18 +181,18 @@ struct brcmf_sdio_dev {
 };
 
 /* Register/deregister interrupt handler. */
-extern int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev);
-extern int brcmf_sdio_intr_unregister(struct brcmf_sdio_dev *sdiodev);
+int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev);
+int brcmf_sdio_intr_unregister(struct brcmf_sdio_dev *sdiodev);
 
 /* sdio device register access interface */
-extern u8 brcmf_sdio_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
-extern u32 brcmf_sdio_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
-extern void brcmf_sdio_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr,
-                            u8 data, int *ret);
-extern void brcmf_sdio_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr,
-                            u32 data, int *ret);
-extern int brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
-                                  void *data, bool write);
+u8 brcmf_sdio_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
+u32 brcmf_sdio_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
+void brcmf_sdio_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr, u8 data,
+                     int *ret);
+void brcmf_sdio_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr, u32 data,
+                     int *ret);
+int brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
+                           void *data, bool write);
 
 /* Buffer transfer to/from device (client) core via cmd53.
  *   fn:       function number
@@ -206,22 +206,17 @@ extern int brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
  * Returns 0 or error code.
  * NOTE: Async operation is not currently supported.
  */
-extern int
-brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
-                     uint flags, struct sk_buff_head *pktq);
-extern int
-brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
-                     uint flags, u8 *buf, uint nbytes);
-
-extern int
-brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
-                     uint flags, struct sk_buff *pkt);
-extern int
-brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
-                     uint flags, u8 *buf, uint nbytes);
-extern int
-brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
-                       uint flags, struct sk_buff_head *pktq);
+int brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+                         uint flags, struct sk_buff_head *pktq);
+int brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+                         uint flags, u8 *buf, uint nbytes);
+
+int brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+                         uint flags, struct sk_buff *pkt);
+int brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+                         uint flags, u8 *buf, uint nbytes);
+int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+                           uint flags, struct sk_buff_head *pktq);
 
 /* Flags bits */
 
@@ -237,46 +232,43 @@ brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
  *   nbytes:   number of bytes to transfer to/from buf
  * Returns 0 or error code.
  */
-extern int brcmf_sdcard_rwdata(struct brcmf_sdio_dev *sdiodev, uint rw,
-                              u32 addr, u8 *buf, uint nbytes);
-extern int brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write,
-                           u32 address, u8 *data, uint size);
+int brcmf_sdcard_rwdata(struct brcmf_sdio_dev *sdiodev, uint rw, u32 addr,
+                       u8 *buf, uint nbytes);
+int brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
+                    u8 *data, uint size);
 
 /* Issue an abort to the specified function */
-extern int brcmf_sdcard_abort(struct brcmf_sdio_dev *sdiodev, uint fn);
+int brcmf_sdcard_abort(struct brcmf_sdio_dev *sdiodev, uint fn);
 
 /* platform specific/high level functions */
-extern int brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev);
-extern int brcmf_sdio_remove(struct brcmf_sdio_dev *sdiodev);
+int brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev);
+int brcmf_sdio_remove(struct brcmf_sdio_dev *sdiodev);
 
 /* attach, return handler on success, NULL if failed.
  *  The handler shall be provided by all subsequent calls. No local cache
  *  cfghdl points to the starting address of pci device mapped memory
  */
-extern int brcmf_sdioh_attach(struct brcmf_sdio_dev *sdiodev);
-extern void brcmf_sdioh_detach(struct brcmf_sdio_dev *sdiodev);
+int brcmf_sdioh_attach(struct brcmf_sdio_dev *sdiodev);
+void brcmf_sdioh_detach(struct brcmf_sdio_dev *sdiodev);
 
 /* read or write one byte using cmd52 */
-extern int brcmf_sdioh_request_byte(struct brcmf_sdio_dev *sdiodev, uint rw,
-                                   uint fnc, uint addr, u8 *byte);
+int brcmf_sdioh_request_byte(struct brcmf_sdio_dev *sdiodev, uint rw, uint fnc,
+                            uint addr, u8 *byte);
 
 /* read or write 2/4 bytes using cmd53 */
-extern int
-brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev,
-                        uint rw, uint fnc, uint addr,
-                        u32 *word, uint nbyte);
+int brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev, uint rw, uint fnc,
+                            uint addr, u32 *word, uint nbyte);
 
 /* Watchdog timer interface for pm ops */
-extern void brcmf_sdio_wdtmr_enable(struct brcmf_sdio_dev *sdiodev,
-                                   bool enable);
+void brcmf_sdio_wdtmr_enable(struct brcmf_sdio_dev *sdiodev, bool enable);
 
-extern void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev);
-extern void brcmf_sdbrcm_disconnect(void *ptr);
-extern void brcmf_sdbrcm_isr(void *arg);
+void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev);
+void brcmf_sdbrcm_disconnect(void *ptr);
+void brcmf_sdbrcm_isr(void *arg);
 
-extern void brcmf_sdbrcm_wd_timer(struct brcmf_sdio *bus, uint wdtick);
+void brcmf_sdbrcm_wd_timer(struct brcmf_sdio *bus, uint wdtick);
 
-extern void brcmf_pm_resume_wait(struct brcmf_sdio_dev *sdiodev,
-                                wait_queue_head_t *wq);
-extern bool brcmf_pm_resume_error(struct brcmf_sdio_dev *sdiodev);
+void brcmf_pm_resume_wait(struct brcmf_sdio_dev *sdiodev,
+                         wait_queue_head_t *wq);
+bool brcmf_pm_resume_error(struct brcmf_sdio_dev *sdiodev);
 #endif                         /* _BRCM_SDH_H_ */
index 39e01a7c8556f2ce022bd5b8c545c2e2a615af5d..f4aea47e0730996ec059f02dbc1f2edaf98c04d1 100644 (file)
@@ -1539,7 +1539,7 @@ void brcmf_usb_exit(void)
        brcmf_release_fw(&fw_image_list);
 }
 
-void brcmf_usb_init(void)
+void brcmf_usb_register(void)
 {
        brcmf_dbg(USB, "Enter\n");
        INIT_LIST_HEAD(&fw_image_list);
index a8a267b5b87aebfd6bd40006c628c7964afc9cca..2d08c155c23bcd93afba34a0f28d17e2e68e46c5 100644 (file)
@@ -172,19 +172,19 @@ struct si_info {
 
 
 /* AMBA Interconnect exported externs */
-extern u32 ai_core_cflags(struct bcma_device *core, u32 mask, u32 val);
+u32 ai_core_cflags(struct bcma_device *core, u32 mask, u32 val);
 
 /* === exported functions === */
-extern struct si_pub *ai_attach(struct bcma_bus *pbus);
-extern void ai_detach(struct si_pub *sih);
-extern uint ai_cc_reg(struct si_pub *sih, uint regoff, u32 mask, u32 val);
-extern void ai_clkctl_init(struct si_pub *sih);
-extern u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih);
-extern bool ai_clkctl_cc(struct si_pub *sih, enum bcma_clkmode mode);
-extern bool ai_deviceremoved(struct si_pub *sih);
+struct si_pub *ai_attach(struct bcma_bus *pbus);
+void ai_detach(struct si_pub *sih);
+uint ai_cc_reg(struct si_pub *sih, uint regoff, u32 mask, u32 val);
+void ai_clkctl_init(struct si_pub *sih);
+u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih);
+bool ai_clkctl_cc(struct si_pub *sih, enum bcma_clkmode mode);
+bool ai_deviceremoved(struct si_pub *sih);
 
 /* Enable Ex-PA for 4313 */
-extern void ai_epa_4313war(struct si_pub *sih);
+void ai_epa_4313war(struct si_pub *sih);
 
 static inline u32 ai_get_cccaps(struct si_pub *sih)
 {
index 73d01e5861090d99d875c886ae8783c9dff5561c..03bdcf29bd50ee228d269b4ae4359159ffa1df9d 100644 (file)
@@ -37,17 +37,17 @@ struct brcms_ampdu_session {
        u16 dma_len;
 };
 
-extern void brcms_c_ampdu_reset_session(struct brcms_ampdu_session *session,
-                                       struct brcms_c_info *wlc);
-extern int brcms_c_ampdu_add_frame(struct brcms_ampdu_session *session,
-                                  struct sk_buff *p);
-extern void brcms_c_ampdu_finalize(struct brcms_ampdu_session *session);
+void brcms_c_ampdu_reset_session(struct brcms_ampdu_session *session,
+                                struct brcms_c_info *wlc);
+int brcms_c_ampdu_add_frame(struct brcms_ampdu_session *session,
+                           struct sk_buff *p);
+void brcms_c_ampdu_finalize(struct brcms_ampdu_session *session);
 
-extern struct ampdu_info *brcms_c_ampdu_attach(struct brcms_c_info *wlc);
-extern void brcms_c_ampdu_detach(struct ampdu_info *ampdu);
-extern void brcms_c_ampdu_dotxstatus(struct ampdu_info *ampdu, struct scb *scb,
-                                struct sk_buff *p, struct tx_status *txs);
-extern void brcms_c_ampdu_macaddr_upd(struct brcms_c_info *wlc);
-extern void brcms_c_ampdu_shm_upd(struct ampdu_info *ampdu);
+struct ampdu_info *brcms_c_ampdu_attach(struct brcms_c_info *wlc);
+void brcms_c_ampdu_detach(struct ampdu_info *ampdu);
+void brcms_c_ampdu_dotxstatus(struct ampdu_info *ampdu, struct scb *scb,
+                             struct sk_buff *p, struct tx_status *txs);
+void brcms_c_ampdu_macaddr_upd(struct brcms_c_info *wlc);
+void brcms_c_ampdu_shm_upd(struct ampdu_info *ampdu);
 
 #endif                         /* _BRCM_AMPDU_H_ */
index 97ea3881a8ec7c969ee40839b65878650def99e6..a3d487ab19646bcc2dc2be25accc559052bc7afc 100644 (file)
 #ifndef _BRCM_ANTSEL_H_
 #define _BRCM_ANTSEL_H_
 
-extern struct antsel_info *brcms_c_antsel_attach(struct brcms_c_info *wlc);
-extern void brcms_c_antsel_detach(struct antsel_info *asi);
-extern void brcms_c_antsel_init(struct antsel_info *asi);
-extern void brcms_c_antsel_antcfg_get(struct antsel_info *asi, bool usedef,
-                                 bool sel,
-                                 u8 id, u8 fbid, u8 *antcfg,
-                                 u8 *fbantcfg);
-extern u8 brcms_c_antsel_antsel2id(struct antsel_info *asi, u16 antsel);
+struct antsel_info *brcms_c_antsel_attach(struct brcms_c_info *wlc);
+void brcms_c_antsel_detach(struct antsel_info *asi);
+void brcms_c_antsel_init(struct antsel_info *asi);
+void brcms_c_antsel_antcfg_get(struct antsel_info *asi, bool usedef, bool sel,
+                              u8 id, u8 fbid, u8 *antcfg, u8 *fbantcfg);
+u8 brcms_c_antsel_antsel2id(struct antsel_info *asi, u16 antsel);
 
 #endif /* _BRCM_ANTSEL_H_ */
index 006483a0abe6452d022600666b5ddf9ad5d28248..39dd3a5b2979a572613c5335473cd121295b4649 100644 (file)
 
 #define BRCMS_DFS_EU (BRCMS_DFS_TPC | BRCMS_RADAR_TYPE_EU) /* Flag for DFS EU */
 
-extern struct brcms_cm_info *
-brcms_c_channel_mgr_attach(struct brcms_c_info *wlc);
+struct brcms_cm_info *brcms_c_channel_mgr_attach(struct brcms_c_info *wlc);
 
-extern void brcms_c_channel_mgr_detach(struct brcms_cm_info *wlc_cm);
+void brcms_c_channel_mgr_detach(struct brcms_cm_info *wlc_cm);
 
-extern bool brcms_c_valid_chanspec_db(struct brcms_cm_info *wlc_cm,
-                                     u16 chspec);
+bool brcms_c_valid_chanspec_db(struct brcms_cm_info *wlc_cm, u16 chspec);
 
-extern void brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm,
-                                  u16 chanspec,
-                                  struct txpwr_limits *txpwr);
-extern void brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm,
-                                    u16 chanspec,
-                                    u8 local_constraint_qdbm);
-extern void brcms_c_regd_init(struct brcms_c_info *wlc);
+void brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm, u16 chanspec,
+                               struct txpwr_limits *txpwr);
+void brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm, u16 chanspec,
+                                 u8 local_constraint_qdbm);
+void brcms_c_regd_init(struct brcms_c_info *wlc);
 
 #endif                         /* _WLC_CHANNEL_H */
index 3a6544710c8ab222cc126ef53d5f19143498cc9e..edc5d105ff980e40e1ad2221a4c7686f87b0cea1 100644 (file)
@@ -457,6 +457,8 @@ static int brcms_ops_start(struct ieee80211_hw *hw)
        if (err != 0)
                brcms_err(wl->wlc->hw->d11core, "%s: brcms_up() returned %d\n",
                          __func__, err);
+
+       bcma_core_pci_power_save(wl->wlc->hw->d11core->bus, true);
        return err;
 }
 
@@ -479,6 +481,8 @@ static void brcms_ops_stop(struct ieee80211_hw *hw)
                return;
        }
 
+       bcma_core_pci_power_save(wl->wlc->hw->d11core->bus, false);
+
        /* put driver in down state */
        spin_lock_bh(&wl->lock);
        brcms_down(wl);
index 4090032e81a29f5d4e9aaf0b9258ae8d8c80bfcc..198053dfc3102ccb17a12b051372d84ff720a32e 100644 (file)
@@ -88,26 +88,26 @@ struct brcms_info {
 };
 
 /* misc callbacks */
-extern void brcms_init(struct brcms_info *wl);
-extern uint brcms_reset(struct brcms_info *wl);
-extern void brcms_intrson(struct brcms_info *wl);
-extern u32 brcms_intrsoff(struct brcms_info *wl);
-extern void brcms_intrsrestore(struct brcms_info *wl, u32 macintmask);
-extern int brcms_up(struct brcms_info *wl);
-extern void brcms_down(struct brcms_info *wl);
-extern void brcms_txflowcontrol(struct brcms_info *wl, struct brcms_if *wlif,
-                               bool state, int prio);
-extern bool brcms_rfkill_set_hw_state(struct brcms_info *wl);
+void brcms_init(struct brcms_info *wl);
+uint brcms_reset(struct brcms_info *wl);
+void brcms_intrson(struct brcms_info *wl);
+u32 brcms_intrsoff(struct brcms_info *wl);
+void brcms_intrsrestore(struct brcms_info *wl, u32 macintmask);
+int brcms_up(struct brcms_info *wl);
+void brcms_down(struct brcms_info *wl);
+void brcms_txflowcontrol(struct brcms_info *wl, struct brcms_if *wlif,
+                        bool state, int prio);
+bool brcms_rfkill_set_hw_state(struct brcms_info *wl);
 
 /* timer functions */
-extern struct brcms_timer *brcms_init_timer(struct brcms_info *wl,
-                                     void (*fn) (void *arg), void *arg,
-                                     const char *name);
-extern void brcms_free_timer(struct brcms_timer *timer);
-extern void brcms_add_timer(struct brcms_timer *timer, uint ms, int periodic);
-extern bool brcms_del_timer(struct brcms_timer *timer);
-extern void brcms_dpc(unsigned long data);
-extern void brcms_timer(struct brcms_timer *t);
-extern void brcms_fatal_error(struct brcms_info *wl);
+struct brcms_timer *brcms_init_timer(struct brcms_info *wl,
+                                    void (*fn) (void *arg), void *arg,
+                                    const char *name);
+void brcms_free_timer(struct brcms_timer *timer);
+void brcms_add_timer(struct brcms_timer *timer, uint ms, int periodic);
+bool brcms_del_timer(struct brcms_timer *timer);
+void brcms_dpc(unsigned long data);
+void brcms_timer(struct brcms_timer *t);
+void brcms_fatal_error(struct brcms_info *wl);
 
 #endif                         /* _BRCM_MAC80211_IF_H_ */
index 4608e0eb14939d12a73a4af5d3c2408e27bc877c..69b14dc4dd55d0205e97ddc9c70d3f1425a88d23 100644 (file)
@@ -1906,14 +1906,14 @@ static void brcms_c_get_macaddr(struct brcms_hardware *wlc_hw, u8 etheraddr[ETH_
 
        /* If macaddr exists, use it (Sromrev4, CIS, ...). */
        if (!is_zero_ether_addr(sprom->il0mac)) {
-               memcpy(etheraddr, sprom->il0mac, 6);
+               memcpy(etheraddr, sprom->il0mac, ETH_ALEN);
                return;
        }
 
        if (wlc_hw->_nbands > 1)
-               memcpy(etheraddr, sprom->et1mac, 6);
+               memcpy(etheraddr, sprom->et1mac, ETH_ALEN);
        else
-               memcpy(etheraddr, sprom->il0mac, 6);
+               memcpy(etheraddr, sprom->il0mac, ETH_ALEN);
 }
 
 /* power both the pll and external oscillator on/off */
index b5d7a38b53fe3d8baf6f8cbc28ba8cef0aaa2b8a..c4d135cff04ad2f7883c783fb96244bbbabfa370 100644 (file)
@@ -616,66 +616,54 @@ struct brcms_bss_cfg {
        struct brcms_bss_info *current_bss;
 };
 
-extern int brcms_c_txfifo(struct brcms_c_info *wlc, uint fifo,
-                          struct sk_buff *p);
-extern int brcms_b_xmtfifo_sz_get(struct brcms_hardware *wlc_hw, uint fifo,
-                  uint *blocks);
-
-extern int brcms_c_set_gmode(struct brcms_c_info *wlc, u8 gmode, bool config);
-extern void brcms_c_mac_promisc(struct brcms_c_info *wlc, uint filter_flags);
-extern u16 brcms_c_calc_lsig_len(struct brcms_c_info *wlc, u32 ratespec,
-                               uint mac_len);
-extern u32 brcms_c_rspec_to_rts_rspec(struct brcms_c_info *wlc,
-                                            u32 rspec,
-                                            bool use_rspec, u16 mimo_ctlchbw);
-extern u16 brcms_c_compute_rtscts_dur(struct brcms_c_info *wlc, bool cts_only,
-                                     u32 rts_rate,
-                                     u32 frame_rate,
-                                     u8 rts_preamble_type,
-                                     u8 frame_preamble_type, uint frame_len,
-                                     bool ba);
-extern void brcms_c_inval_dma_pkts(struct brcms_hardware *hw,
-                              struct ieee80211_sta *sta,
-                              void (*dma_callback_fn));
-extern void brcms_c_update_probe_resp(struct brcms_c_info *wlc, bool suspend);
-extern int brcms_c_set_nmode(struct brcms_c_info *wlc);
-extern void brcms_c_beacon_phytxctl_txant_upd(struct brcms_c_info *wlc,
-                                         u32 bcn_rate);
-extern void brcms_b_antsel_type_set(struct brcms_hardware *wlc_hw,
-                                    u8 antsel_type);
-extern void brcms_b_set_chanspec(struct brcms_hardware *wlc_hw,
-                                 u16 chanspec,
-                                 bool mute, struct txpwr_limits *txpwr);
-extern void brcms_b_write_shm(struct brcms_hardware *wlc_hw, uint offset,
-                             u16 v);
-extern u16 brcms_b_read_shm(struct brcms_hardware *wlc_hw, uint offset);
-extern void brcms_b_mhf(struct brcms_hardware *wlc_hw, u8 idx, u16 mask,
-                       u16 val, int bands);
-extern void brcms_b_corereset(struct brcms_hardware *wlc_hw, u32 flags);
-extern void brcms_b_mctrl(struct brcms_hardware *wlc_hw, u32 mask, u32 val);
-extern void brcms_b_phy_reset(struct brcms_hardware *wlc_hw);
-extern void brcms_b_bw_set(struct brcms_hardware *wlc_hw, u16 bw);
-extern void brcms_b_core_phypll_reset(struct brcms_hardware *wlc_hw);
-extern void brcms_c_ucode_wake_override_set(struct brcms_hardware *wlc_hw,
-                                       u32 override_bit);
-extern void brcms_c_ucode_wake_override_clear(struct brcms_hardware *wlc_hw,
-                                         u32 override_bit);
-extern void brcms_b_write_template_ram(struct brcms_hardware *wlc_hw,
-                                      int offset, int len, void *buf);
-extern u16 brcms_b_rate_shm_offset(struct brcms_hardware *wlc_hw, u8 rate);
-extern void brcms_b_copyto_objmem(struct brcms_hardware *wlc_hw,
-                                  uint offset, const void *buf, int len,
-                                  u32 sel);
-extern void brcms_b_copyfrom_objmem(struct brcms_hardware *wlc_hw, uint offset,
-                                    void *buf, int len, u32 sel);
-extern void brcms_b_switch_macfreq(struct brcms_hardware *wlc_hw, u8 spurmode);
-extern u16 brcms_b_get_txant(struct brcms_hardware *wlc_hw);
-extern void brcms_b_phyclk_fgc(struct brcms_hardware *wlc_hw, bool clk);
-extern void brcms_b_macphyclk_set(struct brcms_hardware *wlc_hw, bool clk);
-extern void brcms_b_core_phypll_ctl(struct brcms_hardware *wlc_hw, bool on);
-extern void brcms_b_txant_set(struct brcms_hardware *wlc_hw, u16 phytxant);
-extern void brcms_b_band_stf_ss_set(struct brcms_hardware *wlc_hw,
-                                   u8 stf_mode);
-extern void brcms_c_init_scb(struct scb *scb);
+int brcms_c_txfifo(struct brcms_c_info *wlc, uint fifo, struct sk_buff *p);
+int brcms_b_xmtfifo_sz_get(struct brcms_hardware *wlc_hw, uint fifo,
+                          uint *blocks);
+
+int brcms_c_set_gmode(struct brcms_c_info *wlc, u8 gmode, bool config);
+void brcms_c_mac_promisc(struct brcms_c_info *wlc, uint filter_flags);
+u16 brcms_c_calc_lsig_len(struct brcms_c_info *wlc, u32 ratespec, uint mac_len);
+u32 brcms_c_rspec_to_rts_rspec(struct brcms_c_info *wlc, u32 rspec,
+                              bool use_rspec, u16 mimo_ctlchbw);
+u16 brcms_c_compute_rtscts_dur(struct brcms_c_info *wlc, bool cts_only,
+                              u32 rts_rate, u32 frame_rate,
+                              u8 rts_preamble_type, u8 frame_preamble_type,
+                              uint frame_len, bool ba);
+void brcms_c_inval_dma_pkts(struct brcms_hardware *hw,
+                           struct ieee80211_sta *sta, void (*dma_callback_fn));
+void brcms_c_update_probe_resp(struct brcms_c_info *wlc, bool suspend);
+int brcms_c_set_nmode(struct brcms_c_info *wlc);
+void brcms_c_beacon_phytxctl_txant_upd(struct brcms_c_info *wlc, u32 bcn_rate);
+void brcms_b_antsel_type_set(struct brcms_hardware *wlc_hw, u8 antsel_type);
+void brcms_b_set_chanspec(struct brcms_hardware *wlc_hw, u16 chanspec,
+                         bool mute, struct txpwr_limits *txpwr);
+void brcms_b_write_shm(struct brcms_hardware *wlc_hw, uint offset, u16 v);
+u16 brcms_b_read_shm(struct brcms_hardware *wlc_hw, uint offset);
+void brcms_b_mhf(struct brcms_hardware *wlc_hw, u8 idx, u16 mask, u16 val,
+                int bands);
+void brcms_b_corereset(struct brcms_hardware *wlc_hw, u32 flags);
+void brcms_b_mctrl(struct brcms_hardware *wlc_hw, u32 mask, u32 val);
+void brcms_b_phy_reset(struct brcms_hardware *wlc_hw);
+void brcms_b_bw_set(struct brcms_hardware *wlc_hw, u16 bw);
+void brcms_b_core_phypll_reset(struct brcms_hardware *wlc_hw);
+void brcms_c_ucode_wake_override_set(struct brcms_hardware *wlc_hw,
+                                    u32 override_bit);
+void brcms_c_ucode_wake_override_clear(struct brcms_hardware *wlc_hw,
+                                      u32 override_bit);
+void brcms_b_write_template_ram(struct brcms_hardware *wlc_hw, int offset,
+                               int len, void *buf);
+u16 brcms_b_rate_shm_offset(struct brcms_hardware *wlc_hw, u8 rate);
+void brcms_b_copyto_objmem(struct brcms_hardware *wlc_hw, uint offset,
+                          const void *buf, int len, u32 sel);
+void brcms_b_copyfrom_objmem(struct brcms_hardware *wlc_hw, uint offset,
+                            void *buf, int len, u32 sel);
+void brcms_b_switch_macfreq(struct brcms_hardware *wlc_hw, u8 spurmode);
+u16 brcms_b_get_txant(struct brcms_hardware *wlc_hw);
+void brcms_b_phyclk_fgc(struct brcms_hardware *wlc_hw, bool clk);
+void brcms_b_macphyclk_set(struct brcms_hardware *wlc_hw, bool clk);
+void brcms_b_core_phypll_ctl(struct brcms_hardware *wlc_hw, bool on);
+void brcms_b_txant_set(struct brcms_hardware *wlc_hw, u16 phytxant);
+void brcms_b_band_stf_ss_set(struct brcms_hardware *wlc_hw, u8 stf_mode);
+void brcms_c_init_scb(struct scb *scb);
 
 #endif                         /* _BRCM_MAIN_H_ */
index e34a71e7d24204a0cc6dd0a3cb12fd043ec2636a..4d3734f48d9c7a444ef7aab2383655b471a0a5f2 100644 (file)
@@ -179,121 +179,106 @@ struct shared_phy_params {
 };
 
 
-extern struct shared_phy *wlc_phy_shared_attach(struct shared_phy_params *shp);
-extern struct brcms_phy_pub *wlc_phy_attach(struct shared_phy *sh,
-                                           struct bcma_device *d11core,
-                                           int bandtype, struct wiphy *wiphy);
-extern void wlc_phy_detach(struct brcms_phy_pub *ppi);
-
-extern bool wlc_phy_get_phyversion(struct brcms_phy_pub *pih, u16 *phytype,
-                                  u16 *phyrev, u16 *radioid,
-                                  u16 *radiover);
-extern bool wlc_phy_get_encore(struct brcms_phy_pub *pih);
-extern u32 wlc_phy_get_coreflags(struct brcms_phy_pub *pih);
-
-extern void wlc_phy_hw_clk_state_upd(struct brcms_phy_pub *ppi, bool newstate);
-extern void wlc_phy_hw_state_upd(struct brcms_phy_pub *ppi, bool newstate);
-extern void wlc_phy_init(struct brcms_phy_pub *ppi, u16 chanspec);
-extern void wlc_phy_watchdog(struct brcms_phy_pub *ppi);
-extern int wlc_phy_down(struct brcms_phy_pub *ppi);
-extern u32 wlc_phy_clk_bwbits(struct brcms_phy_pub *pih);
-extern void wlc_phy_cal_init(struct brcms_phy_pub *ppi);
-extern void wlc_phy_antsel_init(struct brcms_phy_pub *ppi, bool lut_init);
-
-extern void wlc_phy_chanspec_set(struct brcms_phy_pub *ppi,
-                                u16 chanspec);
-extern u16 wlc_phy_chanspec_get(struct brcms_phy_pub *ppi);
-extern void wlc_phy_chanspec_radio_set(struct brcms_phy_pub *ppi,
-                                      u16 newch);
-extern u16 wlc_phy_bw_state_get(struct brcms_phy_pub *ppi);
-extern void wlc_phy_bw_state_set(struct brcms_phy_pub *ppi, u16 bw);
-
-extern int wlc_phy_rssi_compute(struct brcms_phy_pub *pih,
-                               struct d11rxhdr *rxh);
-extern void wlc_phy_por_inform(struct brcms_phy_pub *ppi);
-extern void wlc_phy_noise_sample_intr(struct brcms_phy_pub *ppi);
-extern bool wlc_phy_bist_check_phy(struct brcms_phy_pub *ppi);
-
-extern void wlc_phy_set_deaf(struct brcms_phy_pub *ppi, bool user_flag);
-
-extern void wlc_phy_switch_radio(struct brcms_phy_pub *ppi, bool on);
-extern void wlc_phy_anacore(struct brcms_phy_pub *ppi, bool on);
-
-
-extern void wlc_phy_BSSinit(struct brcms_phy_pub *ppi, bool bonlyap, int rssi);
-
-extern void wlc_phy_chanspec_ch14_widefilter_set(struct brcms_phy_pub *ppi,
-                                                bool wide_filter);
-extern void wlc_phy_chanspec_band_validch(struct brcms_phy_pub *ppi, uint band,
-                                         struct brcms_chanvec *channels);
-extern u16 wlc_phy_chanspec_band_firstch(struct brcms_phy_pub *ppi,
-                                        uint band);
-
-extern void wlc_phy_txpower_sromlimit(struct brcms_phy_pub *ppi, uint chan,
-                                     u8 *_min_, u8 *_max_, int rate);
-extern void wlc_phy_txpower_sromlimit_max_get(struct brcms_phy_pub *ppi,
-                                             uint chan, u8 *_max_, u8 *_min_);
-extern void wlc_phy_txpower_boardlimit_band(struct brcms_phy_pub *ppi,
-                                           uint band, s32 *, s32 *, u32 *);
-extern void wlc_phy_txpower_limit_set(struct brcms_phy_pub *ppi,
-                                     struct txpwr_limits *,
-                                     u16 chanspec);
-extern int wlc_phy_txpower_get(struct brcms_phy_pub *ppi, uint *qdbm,
-                              bool *override);
-extern int wlc_phy_txpower_set(struct brcms_phy_pub *ppi, uint qdbm,
-                              bool override);
-extern void wlc_phy_txpower_target_set(struct brcms_phy_pub *ppi,
-                                      struct txpwr_limits *);
-extern bool wlc_phy_txpower_hw_ctrl_get(struct brcms_phy_pub *ppi);
-extern void wlc_phy_txpower_hw_ctrl_set(struct brcms_phy_pub *ppi,
-                                       bool hwpwrctrl);
-extern u8 wlc_phy_txpower_get_target_min(struct brcms_phy_pub *ppi);
-extern u8 wlc_phy_txpower_get_target_max(struct brcms_phy_pub *ppi);
-extern bool wlc_phy_txpower_ipa_ison(struct brcms_phy_pub *pih);
-
-extern void wlc_phy_stf_chain_init(struct brcms_phy_pub *pih, u8 txchain,
-                                  u8 rxchain);
-extern void wlc_phy_stf_chain_set(struct brcms_phy_pub *pih, u8 txchain,
-                                 u8 rxchain);
-extern void wlc_phy_stf_chain_get(struct brcms_phy_pub *pih, u8 *txchain,
-                                 u8 *rxchain);
-extern u8 wlc_phy_stf_chain_active_get(struct brcms_phy_pub *pih);
-extern s8 wlc_phy_stf_ssmode_get(struct brcms_phy_pub *pih,
-                                u16 chanspec);
-extern void wlc_phy_ldpc_override_set(struct brcms_phy_pub *ppi, bool val);
-
-extern void wlc_phy_cal_perical(struct brcms_phy_pub *ppi, u8 reason);
-extern void wlc_phy_noise_sample_request_external(struct brcms_phy_pub *ppi);
-extern void wlc_phy_edcrs_lock(struct brcms_phy_pub *pih, bool lock);
-extern void wlc_phy_cal_papd_recal(struct brcms_phy_pub *ppi);
-
-extern void wlc_phy_ant_rxdiv_set(struct brcms_phy_pub *ppi, u8 val);
-extern void wlc_phy_clear_tssi(struct brcms_phy_pub *ppi);
-extern void wlc_phy_hold_upd(struct brcms_phy_pub *ppi, u32 id, bool val);
-extern void wlc_phy_mute_upd(struct brcms_phy_pub *ppi, bool val, u32 flags);
-
-extern void wlc_phy_antsel_type_set(struct brcms_phy_pub *ppi, u8 antsel_type);
-
-extern void wlc_phy_txpower_get_current(struct brcms_phy_pub *ppi,
-                                       struct tx_power *power, uint channel);
-
-extern void wlc_phy_initcal_enable(struct brcms_phy_pub *pih, bool initcal);
-extern bool wlc_phy_test_ison(struct brcms_phy_pub *ppi);
-extern void wlc_phy_txpwr_percent_set(struct brcms_phy_pub *ppi,
-                                     u8 txpwr_percent);
-extern void wlc_phy_ofdm_rateset_war(struct brcms_phy_pub *pih, bool war);
-extern void wlc_phy_bf_preempt_enable(struct brcms_phy_pub *pih,
-                                     bool bf_preempt);
-extern void wlc_phy_machwcap_set(struct brcms_phy_pub *ppi, u32 machwcap);
-
-extern void wlc_phy_runbist_config(struct brcms_phy_pub *ppi, bool start_end);
-
-extern void wlc_phy_freqtrack_start(struct brcms_phy_pub *ppi);
-extern void wlc_phy_freqtrack_end(struct brcms_phy_pub *ppi);
-
-extern const u8 *wlc_phy_get_ofdm_rate_lookup(void);
-
-extern s8 wlc_phy_get_tx_power_offset_by_mcs(struct brcms_phy_pub *ppi,
-                                            u8 mcs_offset);
-extern s8 wlc_phy_get_tx_power_offset(struct brcms_phy_pub *ppi, u8 tbl_offset);
+struct shared_phy *wlc_phy_shared_attach(struct shared_phy_params *shp);
+struct brcms_phy_pub *wlc_phy_attach(struct shared_phy *sh,
+                                    struct bcma_device *d11core, int bandtype,
+                                    struct wiphy *wiphy);
+void wlc_phy_detach(struct brcms_phy_pub *ppi);
+
+bool wlc_phy_get_phyversion(struct brcms_phy_pub *pih, u16 *phytype,
+                           u16 *phyrev, u16 *radioid, u16 *radiover);
+bool wlc_phy_get_encore(struct brcms_phy_pub *pih);
+u32 wlc_phy_get_coreflags(struct brcms_phy_pub *pih);
+
+void wlc_phy_hw_clk_state_upd(struct brcms_phy_pub *ppi, bool newstate);
+void wlc_phy_hw_state_upd(struct brcms_phy_pub *ppi, bool newstate);
+void wlc_phy_init(struct brcms_phy_pub *ppi, u16 chanspec);
+void wlc_phy_watchdog(struct brcms_phy_pub *ppi);
+int wlc_phy_down(struct brcms_phy_pub *ppi);
+u32 wlc_phy_clk_bwbits(struct brcms_phy_pub *pih);
+void wlc_phy_cal_init(struct brcms_phy_pub *ppi);
+void wlc_phy_antsel_init(struct brcms_phy_pub *ppi, bool lut_init);
+
+void wlc_phy_chanspec_set(struct brcms_phy_pub *ppi, u16 chanspec);
+u16 wlc_phy_chanspec_get(struct brcms_phy_pub *ppi);
+void wlc_phy_chanspec_radio_set(struct brcms_phy_pub *ppi, u16 newch);
+u16 wlc_phy_bw_state_get(struct brcms_phy_pub *ppi);
+void wlc_phy_bw_state_set(struct brcms_phy_pub *ppi, u16 bw);
+
+int wlc_phy_rssi_compute(struct brcms_phy_pub *pih, struct d11rxhdr *rxh);
+void wlc_phy_por_inform(struct brcms_phy_pub *ppi);
+void wlc_phy_noise_sample_intr(struct brcms_phy_pub *ppi);
+bool wlc_phy_bist_check_phy(struct brcms_phy_pub *ppi);
+
+void wlc_phy_set_deaf(struct brcms_phy_pub *ppi, bool user_flag);
+
+void wlc_phy_switch_radio(struct brcms_phy_pub *ppi, bool on);
+void wlc_phy_anacore(struct brcms_phy_pub *ppi, bool on);
+
+
+void wlc_phy_BSSinit(struct brcms_phy_pub *ppi, bool bonlyap, int rssi);
+
+void wlc_phy_chanspec_ch14_widefilter_set(struct brcms_phy_pub *ppi,
+                                         bool wide_filter);
+void wlc_phy_chanspec_band_validch(struct brcms_phy_pub *ppi, uint band,
+                                  struct brcms_chanvec *channels);
+u16 wlc_phy_chanspec_band_firstch(struct brcms_phy_pub *ppi, uint band);
+
+void wlc_phy_txpower_sromlimit(struct brcms_phy_pub *ppi, uint chan, u8 *_min_,
+                              u8 *_max_, int rate);
+void wlc_phy_txpower_sromlimit_max_get(struct brcms_phy_pub *ppi, uint chan,
+                                      u8 *_max_, u8 *_min_);
+void wlc_phy_txpower_boardlimit_band(struct brcms_phy_pub *ppi, uint band,
+                                    s32 *, s32 *, u32 *);
+void wlc_phy_txpower_limit_set(struct brcms_phy_pub *ppi, struct txpwr_limits *,
+                              u16 chanspec);
+int wlc_phy_txpower_get(struct brcms_phy_pub *ppi, uint *qdbm, bool *override);
+int wlc_phy_txpower_set(struct brcms_phy_pub *ppi, uint qdbm, bool override);
+void wlc_phy_txpower_target_set(struct brcms_phy_pub *ppi,
+                               struct txpwr_limits *);
+bool wlc_phy_txpower_hw_ctrl_get(struct brcms_phy_pub *ppi);
+void wlc_phy_txpower_hw_ctrl_set(struct brcms_phy_pub *ppi, bool hwpwrctrl);
+u8 wlc_phy_txpower_get_target_min(struct brcms_phy_pub *ppi);
+u8 wlc_phy_txpower_get_target_max(struct brcms_phy_pub *ppi);
+bool wlc_phy_txpower_ipa_ison(struct brcms_phy_pub *pih);
+
+void wlc_phy_stf_chain_init(struct brcms_phy_pub *pih, u8 txchain, u8 rxchain);
+void wlc_phy_stf_chain_set(struct brcms_phy_pub *pih, u8 txchain, u8 rxchain);
+void wlc_phy_stf_chain_get(struct brcms_phy_pub *pih, u8 *txchain, u8 *rxchain);
+u8 wlc_phy_stf_chain_active_get(struct brcms_phy_pub *pih);
+s8 wlc_phy_stf_ssmode_get(struct brcms_phy_pub *pih, u16 chanspec);
+void wlc_phy_ldpc_override_set(struct brcms_phy_pub *ppi, bool val);
+
+void wlc_phy_cal_perical(struct brcms_phy_pub *ppi, u8 reason);
+void wlc_phy_noise_sample_request_external(struct brcms_phy_pub *ppi);
+void wlc_phy_edcrs_lock(struct brcms_phy_pub *pih, bool lock);
+void wlc_phy_cal_papd_recal(struct brcms_phy_pub *ppi);
+
+void wlc_phy_ant_rxdiv_set(struct brcms_phy_pub *ppi, u8 val);
+void wlc_phy_clear_tssi(struct brcms_phy_pub *ppi);
+void wlc_phy_hold_upd(struct brcms_phy_pub *ppi, u32 id, bool val);
+void wlc_phy_mute_upd(struct brcms_phy_pub *ppi, bool val, u32 flags);
+
+void wlc_phy_antsel_type_set(struct brcms_phy_pub *ppi, u8 antsel_type);
+
+void wlc_phy_txpower_get_current(struct brcms_phy_pub *ppi,
+                                struct tx_power *power, uint channel);
+
+void wlc_phy_initcal_enable(struct brcms_phy_pub *pih, bool initcal);
+bool wlc_phy_test_ison(struct brcms_phy_pub *ppi);
+void wlc_phy_txpwr_percent_set(struct brcms_phy_pub *ppi, u8 txpwr_percent);
+void wlc_phy_ofdm_rateset_war(struct brcms_phy_pub *pih, bool war);
+void wlc_phy_bf_preempt_enable(struct brcms_phy_pub *pih, bool bf_preempt);
+void wlc_phy_machwcap_set(struct brcms_phy_pub *ppi, u32 machwcap);
+
+void wlc_phy_runbist_config(struct brcms_phy_pub *ppi, bool start_end);
+
+void wlc_phy_freqtrack_start(struct brcms_phy_pub *ppi);
+void wlc_phy_freqtrack_end(struct brcms_phy_pub *ppi);
+
+const u8 *wlc_phy_get_ofdm_rate_lookup(void);
+
+s8 wlc_phy_get_tx_power_offset_by_mcs(struct brcms_phy_pub *ppi,
+                                     u8 mcs_offset);
+s8 wlc_phy_get_tx_power_offset(struct brcms_phy_pub *ppi, u8 tbl_offset);
 #endif                          /* _BRCM_PHY_HAL_H_ */
index 1dc767c31653b29a9458cc08b9d113ab1c6979ad..4960f7d2680430313d42e3e7e64787a21c0f8dde 100644 (file)
@@ -910,113 +910,103 @@ struct lcnphy_radio_regs {
        u8 do_init_g;
 };
 
-extern u16 read_phy_reg(struct brcms_phy *pi, u16 addr);
-extern void write_phy_reg(struct brcms_phy *pi, u16 addr, u16 val);
-extern void and_phy_reg(struct brcms_phy *pi, u16 addr, u16 val);
-extern void or_phy_reg(struct brcms_phy *pi, u16 addr, u16 val);
-extern void mod_phy_reg(struct brcms_phy *pi, u16 addr, u16 mask, u16 val);
-
-extern u16 read_radio_reg(struct brcms_phy *pi, u16 addr);
-extern void or_radio_reg(struct brcms_phy *pi, u16 addr, u16 val);
-extern void and_radio_reg(struct brcms_phy *pi, u16 addr, u16 val);
-extern void mod_radio_reg(struct brcms_phy *pi, u16 addr, u16 mask,
-                         u16 val);
-extern void xor_radio_reg(struct brcms_phy *pi, u16 addr, u16 mask);
-
-extern void write_radio_reg(struct brcms_phy *pi, u16 addr, u16 val);
-
-extern void wlc_phyreg_enter(struct brcms_phy_pub *pih);
-extern void wlc_phyreg_exit(struct brcms_phy_pub *pih);
-extern void wlc_radioreg_enter(struct brcms_phy_pub *pih);
-extern void wlc_radioreg_exit(struct brcms_phy_pub *pih);
-
-extern void wlc_phy_read_table(struct brcms_phy *pi,
-                              const struct phytbl_info *ptbl_info,
-                              u16 tblAddr, u16 tblDataHi,
-                              u16 tblDatalo);
-extern void wlc_phy_write_table(struct brcms_phy *pi,
-                               const struct phytbl_info *ptbl_info,
-                               u16 tblAddr, u16 tblDataHi, u16 tblDatalo);
-extern void wlc_phy_table_addr(struct brcms_phy *pi, uint tbl_id,
-                              uint tbl_offset, u16 tblAddr, u16 tblDataHi,
-                              u16 tblDataLo);
-extern void wlc_phy_table_data_write(struct brcms_phy *pi, uint width, u32 val);
-
-extern void write_phy_channel_reg(struct brcms_phy *pi, uint val);
-extern void wlc_phy_txpower_update_shm(struct brcms_phy *pi);
-
-extern u8 wlc_phy_nbits(s32 value);
-extern void wlc_phy_compute_dB(u32 *cmplx_pwr, s8 *p_dB, u8 core);
-
-extern uint wlc_phy_init_radio_regs_allbands(struct brcms_phy *pi,
-                                            struct radio_20xx_regs *radioregs);
-extern uint wlc_phy_init_radio_regs(struct brcms_phy *pi,
-                                   const struct radio_regs *radioregs,
-                                   u16 core_offset);
-
-extern void wlc_phy_txpower_ipa_upd(struct brcms_phy *pi);
-
-extern void wlc_phy_do_dummy_tx(struct brcms_phy *pi, bool ofdm, bool pa_on);
-extern void wlc_phy_papd_decode_epsilon(u32 epsilon, s32 *eps_real,
-                                       s32 *eps_imag);
-
-extern void wlc_phy_cal_perical_mphase_reset(struct brcms_phy *pi);
-extern void wlc_phy_cal_perical_mphase_restart(struct brcms_phy *pi);
-
-extern bool wlc_phy_attach_nphy(struct brcms_phy *pi);
-extern bool wlc_phy_attach_lcnphy(struct brcms_phy *pi);
-
-extern void wlc_phy_detach_lcnphy(struct brcms_phy *pi);
-
-extern void wlc_phy_init_nphy(struct brcms_phy *pi);
-extern void wlc_phy_init_lcnphy(struct brcms_phy *pi);
-
-extern void wlc_phy_cal_init_nphy(struct brcms_phy *pi);
-extern void wlc_phy_cal_init_lcnphy(struct brcms_phy *pi);
-
-extern void wlc_phy_chanspec_set_nphy(struct brcms_phy *pi,
-                                     u16 chanspec);
-extern void wlc_phy_chanspec_set_lcnphy(struct brcms_phy *pi,
-                                       u16 chanspec);
-extern void wlc_phy_chanspec_set_fixup_lcnphy(struct brcms_phy *pi,
-                                             u16 chanspec);
-extern int wlc_phy_channel2freq(uint channel);
-extern int wlc_phy_chanspec_freq2bandrange_lpssn(uint);
-extern int wlc_phy_chanspec_bandrange_get(struct brcms_phy *, u16 chanspec);
-
-extern void wlc_lcnphy_set_tx_pwr_ctrl(struct brcms_phy *pi, u16 mode);
-extern s8 wlc_lcnphy_get_current_tx_pwr_idx(struct brcms_phy *pi);
-
-extern void wlc_phy_txpower_recalc_target_nphy(struct brcms_phy *pi);
-extern void wlc_lcnphy_txpower_recalc_target(struct brcms_phy *pi);
-extern void wlc_phy_txpower_recalc_target_lcnphy(struct brcms_phy *pi);
-
-extern void wlc_lcnphy_set_tx_pwr_by_index(struct brcms_phy *pi, int index);
-extern void wlc_lcnphy_tx_pu(struct brcms_phy *pi, bool bEnable);
-extern void wlc_lcnphy_stop_tx_tone(struct brcms_phy *pi);
-extern void wlc_lcnphy_start_tx_tone(struct brcms_phy *pi, s32 f_kHz,
-                                    u16 max_val, bool iqcalmode);
-
-extern void wlc_phy_txpower_sromlimit_get_nphy(struct brcms_phy *pi, uint chan,
-                                              u8 *max_pwr, u8 rate_id);
-extern void wlc_phy_ofdm_to_mcs_powers_nphy(u8 *power, u8 rate_mcs_start,
-                                           u8 rate_mcs_end,
-                                           u8 rate_ofdm_start);
-extern void wlc_phy_mcs_to_ofdm_powers_nphy(u8 *power,
-                                           u8 rate_ofdm_start,
-                                           u8 rate_ofdm_end,
-                                           u8 rate_mcs_start);
-
-extern u16 wlc_lcnphy_tempsense(struct brcms_phy *pi, bool mode);
-extern s16 wlc_lcnphy_tempsense_new(struct brcms_phy *pi, bool mode);
-extern s8 wlc_lcnphy_tempsense_degree(struct brcms_phy *pi, bool mode);
-extern s8 wlc_lcnphy_vbatsense(struct brcms_phy *pi, bool mode);
-extern void wlc_phy_carrier_suppress_lcnphy(struct brcms_phy *pi);
-extern void wlc_lcnphy_crsuprs(struct brcms_phy *pi, int channel);
-extern void wlc_lcnphy_epa_switch(struct brcms_phy *pi, bool mode);
-extern void wlc_2064_vco_cal(struct brcms_phy *pi);
-
-extern void wlc_phy_txpower_recalc_target(struct brcms_phy *pi);
+u16 read_phy_reg(struct brcms_phy *pi, u16 addr);
+void write_phy_reg(struct brcms_phy *pi, u16 addr, u16 val);
+void and_phy_reg(struct brcms_phy *pi, u16 addr, u16 val);
+void or_phy_reg(struct brcms_phy *pi, u16 addr, u16 val);
+void mod_phy_reg(struct brcms_phy *pi, u16 addr, u16 mask, u16 val);
+
+u16 read_radio_reg(struct brcms_phy *pi, u16 addr);
+void or_radio_reg(struct brcms_phy *pi, u16 addr, u16 val);
+void and_radio_reg(struct brcms_phy *pi, u16 addr, u16 val);
+void mod_radio_reg(struct brcms_phy *pi, u16 addr, u16 mask, u16 val);
+void xor_radio_reg(struct brcms_phy *pi, u16 addr, u16 mask);
+
+void write_radio_reg(struct brcms_phy *pi, u16 addr, u16 val);
+
+void wlc_phyreg_enter(struct brcms_phy_pub *pih);
+void wlc_phyreg_exit(struct brcms_phy_pub *pih);
+void wlc_radioreg_enter(struct brcms_phy_pub *pih);
+void wlc_radioreg_exit(struct brcms_phy_pub *pih);
+
+void wlc_phy_read_table(struct brcms_phy *pi,
+                       const struct phytbl_info *ptbl_info,
+                       u16 tblAddr, u16 tblDataHi, u16 tblDatalo);
+void wlc_phy_write_table(struct brcms_phy *pi,
+                        const struct phytbl_info *ptbl_info,
+                        u16 tblAddr, u16 tblDataHi, u16 tblDatalo);
+void wlc_phy_table_addr(struct brcms_phy *pi, uint tbl_id, uint tbl_offset,
+                       u16 tblAddr, u16 tblDataHi, u16 tblDataLo);
+void wlc_phy_table_data_write(struct brcms_phy *pi, uint width, u32 val);
+
+void write_phy_channel_reg(struct brcms_phy *pi, uint val);
+void wlc_phy_txpower_update_shm(struct brcms_phy *pi);
+
+u8 wlc_phy_nbits(s32 value);
+void wlc_phy_compute_dB(u32 *cmplx_pwr, s8 *p_dB, u8 core);
+
+uint wlc_phy_init_radio_regs_allbands(struct brcms_phy *pi,
+                                     struct radio_20xx_regs *radioregs);
+uint wlc_phy_init_radio_regs(struct brcms_phy *pi,
+                            const struct radio_regs *radioregs,
+                            u16 core_offset);
+
+void wlc_phy_txpower_ipa_upd(struct brcms_phy *pi);
+
+void wlc_phy_do_dummy_tx(struct brcms_phy *pi, bool ofdm, bool pa_on);
+void wlc_phy_papd_decode_epsilon(u32 epsilon, s32 *eps_real, s32 *eps_imag);
+
+void wlc_phy_cal_perical_mphase_reset(struct brcms_phy *pi);
+void wlc_phy_cal_perical_mphase_restart(struct brcms_phy *pi);
+
+bool wlc_phy_attach_nphy(struct brcms_phy *pi);
+bool wlc_phy_attach_lcnphy(struct brcms_phy *pi);
+
+void wlc_phy_detach_lcnphy(struct brcms_phy *pi);
+
+void wlc_phy_init_nphy(struct brcms_phy *pi);
+void wlc_phy_init_lcnphy(struct brcms_phy *pi);
+
+void wlc_phy_cal_init_nphy(struct brcms_phy *pi);
+void wlc_phy_cal_init_lcnphy(struct brcms_phy *pi);
+
+void wlc_phy_chanspec_set_nphy(struct brcms_phy *pi, u16 chanspec);
+void wlc_phy_chanspec_set_lcnphy(struct brcms_phy *pi, u16 chanspec);
+void wlc_phy_chanspec_set_fixup_lcnphy(struct brcms_phy *pi, u16 chanspec);
+int wlc_phy_channel2freq(uint channel);
+int wlc_phy_chanspec_freq2bandrange_lpssn(uint);
+int wlc_phy_chanspec_bandrange_get(struct brcms_phy *, u16 chanspec);
+
+void wlc_lcnphy_set_tx_pwr_ctrl(struct brcms_phy *pi, u16 mode);
+s8 wlc_lcnphy_get_current_tx_pwr_idx(struct brcms_phy *pi);
+
+void wlc_phy_txpower_recalc_target_nphy(struct brcms_phy *pi);
+void wlc_lcnphy_txpower_recalc_target(struct brcms_phy *pi);
+void wlc_phy_txpower_recalc_target_lcnphy(struct brcms_phy *pi);
+
+void wlc_lcnphy_set_tx_pwr_by_index(struct brcms_phy *pi, int index);
+void wlc_lcnphy_tx_pu(struct brcms_phy *pi, bool bEnable);
+void wlc_lcnphy_stop_tx_tone(struct brcms_phy *pi);
+void wlc_lcnphy_start_tx_tone(struct brcms_phy *pi, s32 f_kHz, u16 max_val,
+                             bool iqcalmode);
+
+void wlc_phy_txpower_sromlimit_get_nphy(struct brcms_phy *pi, uint chan,
+                                       u8 *max_pwr, u8 rate_id);
+void wlc_phy_ofdm_to_mcs_powers_nphy(u8 *power, u8 rate_mcs_start,
+                                    u8 rate_mcs_end, u8 rate_ofdm_start);
+void wlc_phy_mcs_to_ofdm_powers_nphy(u8 *power, u8 rate_ofdm_start,
+                                    u8 rate_ofdm_end, u8 rate_mcs_start);
+
+u16 wlc_lcnphy_tempsense(struct brcms_phy *pi, bool mode);
+s16 wlc_lcnphy_tempsense_new(struct brcms_phy *pi, bool mode);
+s8 wlc_lcnphy_tempsense_degree(struct brcms_phy *pi, bool mode);
+s8 wlc_lcnphy_vbatsense(struct brcms_phy *pi, bool mode);
+void wlc_phy_carrier_suppress_lcnphy(struct brcms_phy *pi);
+void wlc_lcnphy_crsuprs(struct brcms_phy *pi, int channel);
+void wlc_lcnphy_epa_switch(struct brcms_phy *pi, bool mode);
+void wlc_2064_vco_cal(struct brcms_phy *pi);
+
+void wlc_phy_txpower_recalc_target(struct brcms_phy *pi);
 
 #define LCNPHY_TBL_ID_PAPDCOMPDELTATBL 0x18
 #define LCNPHY_TX_POWER_TABLE_SIZE     128
@@ -1030,26 +1020,24 @@ extern void wlc_phy_txpower_recalc_target(struct brcms_phy *pi);
 
 #define LCNPHY_TX_PWR_CTRL_TEMPBASED   0xE001
 
-extern void wlc_lcnphy_write_table(struct brcms_phy *pi,
-                                  const struct phytbl_info *pti);
-extern void wlc_lcnphy_read_table(struct brcms_phy *pi,
-                                 struct phytbl_info *pti);
-extern void wlc_lcnphy_set_tx_iqcc(struct brcms_phy *pi, u16 a, u16 b);
-extern void wlc_lcnphy_set_tx_locc(struct brcms_phy *pi, u16 didq);
-extern void wlc_lcnphy_get_tx_iqcc(struct brcms_phy *pi, u16 *a, u16 *b);
-extern u16 wlc_lcnphy_get_tx_locc(struct brcms_phy *pi);
-extern void wlc_lcnphy_get_radio_loft(struct brcms_phy *pi, u8 *ei0,
-                                     u8 *eq0, u8 *fi0, u8 *fq0);
-extern void wlc_lcnphy_calib_modes(struct brcms_phy *pi, uint mode);
-extern void wlc_lcnphy_deaf_mode(struct brcms_phy *pi, bool mode);
-extern bool wlc_phy_tpc_isenabled_lcnphy(struct brcms_phy *pi);
-extern void wlc_lcnphy_tx_pwr_update_npt(struct brcms_phy *pi);
-extern s32 wlc_lcnphy_tssi2dbm(s32 tssi, s32 a1, s32 b0, s32 b1);
-extern void wlc_lcnphy_get_tssi(struct brcms_phy *pi, s8 *ofdm_pwr,
-                               s8 *cck_pwr);
-extern void wlc_lcnphy_tx_power_adjustment(struct brcms_phy_pub *ppi);
-
-extern s32 wlc_lcnphy_rx_signal_power(struct brcms_phy *pi, s32 gain_index);
+void wlc_lcnphy_write_table(struct brcms_phy *pi,
+                           const struct phytbl_info *pti);
+void wlc_lcnphy_read_table(struct brcms_phy *pi, struct phytbl_info *pti);
+void wlc_lcnphy_set_tx_iqcc(struct brcms_phy *pi, u16 a, u16 b);
+void wlc_lcnphy_set_tx_locc(struct brcms_phy *pi, u16 didq);
+void wlc_lcnphy_get_tx_iqcc(struct brcms_phy *pi, u16 *a, u16 *b);
+u16 wlc_lcnphy_get_tx_locc(struct brcms_phy *pi);
+void wlc_lcnphy_get_radio_loft(struct brcms_phy *pi, u8 *ei0, u8 *eq0, u8 *fi0,
+                              u8 *fq0);
+void wlc_lcnphy_calib_modes(struct brcms_phy *pi, uint mode);
+void wlc_lcnphy_deaf_mode(struct brcms_phy *pi, bool mode);
+bool wlc_phy_tpc_isenabled_lcnphy(struct brcms_phy *pi);
+void wlc_lcnphy_tx_pwr_update_npt(struct brcms_phy *pi);
+s32 wlc_lcnphy_tssi2dbm(s32 tssi, s32 a1, s32 b0, s32 b1);
+void wlc_lcnphy_get_tssi(struct brcms_phy *pi, s8 *ofdm_pwr, s8 *cck_pwr);
+void wlc_lcnphy_tx_power_adjustment(struct brcms_phy_pub *ppi);
+
+s32 wlc_lcnphy_rx_signal_power(struct brcms_phy *pi, s32 gain_index);
 
 #define NPHY_MAX_HPVGA1_INDEX          10
 #define NPHY_DEF_HPVGA1_INDEXLIMIT     7
@@ -1060,9 +1048,8 @@ struct phy_iq_est {
        u32 q_pwr;
 };
 
-extern void wlc_phy_stay_in_carriersearch_nphy(struct brcms_phy *pi,
-                                              bool enable);
-extern void wlc_nphy_deaf_mode(struct brcms_phy *pi, bool mode);
+void wlc_phy_stay_in_carriersearch_nphy(struct brcms_phy *pi, bool enable);
+void wlc_nphy_deaf_mode(struct brcms_phy *pi, bool mode);
 
 #define wlc_phy_write_table_nphy(pi, pti) \
        wlc_phy_write_table(pi, pti, 0x72, 0x74, 0x73)
@@ -1076,10 +1063,10 @@ extern void wlc_nphy_deaf_mode(struct brcms_phy *pi, bool mode);
 #define wlc_nphy_table_data_write(pi, w, v) \
        wlc_phy_table_data_write((pi), (w), (v))
 
-extern void wlc_phy_table_read_nphy(struct brcms_phy *pi, u32, u32 l, u32 o,
-                                   u32 w, void *d);
-extern void wlc_phy_table_write_nphy(struct brcms_phy *pi, u32, u32, u32,
-                                    u32, const void *);
+void wlc_phy_table_read_nphy(struct brcms_phy *pi, u32, u32 l, u32 o, u32 w,
+                            void *d);
+void wlc_phy_table_write_nphy(struct brcms_phy *pi, u32, u32, u32, u32,
+                             const void *);
 
 #define        PHY_IPA(pi) \
        ((pi->ipa2g_on && CHSPEC_IS2G(pi->radio_chanspec)) || \
@@ -1089,73 +1076,67 @@ extern void wlc_phy_table_write_nphy(struct brcms_phy *pi, u32, u32, u32,
        if (NREV_LT((pi)->pubpi.phy_rev, 3)) \
                (void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol))
 
-extern void wlc_phy_cal_perical_nphy_run(struct brcms_phy *pi, u8 caltype);
-extern void wlc_phy_aci_reset_nphy(struct brcms_phy *pi);
-extern void wlc_phy_pa_override_nphy(struct brcms_phy *pi, bool en);
-
-extern u8 wlc_phy_get_chan_freq_range_nphy(struct brcms_phy *pi, uint chan);
-extern void wlc_phy_switch_radio_nphy(struct brcms_phy *pi, bool on);
-
-extern void wlc_phy_stf_chain_upd_nphy(struct brcms_phy *pi);
-
-extern void wlc_phy_force_rfseq_nphy(struct brcms_phy *pi, u8 cmd);
-extern s16 wlc_phy_tempsense_nphy(struct brcms_phy *pi);
-
-extern u16 wlc_phy_classifier_nphy(struct brcms_phy *pi, u16 mask, u16 val);
-
-extern void wlc_phy_rx_iq_est_nphy(struct brcms_phy *pi, struct phy_iq_est *est,
-                                  u16 num_samps, u8 wait_time,
-                                  u8 wait_for_crs);
-
-extern void wlc_phy_rx_iq_coeffs_nphy(struct brcms_phy *pi, u8 write,
-                                     struct nphy_iq_comp *comp);
-extern void wlc_phy_aci_and_noise_reduction_nphy(struct brcms_phy *pi);
-
-extern void wlc_phy_rxcore_setstate_nphy(struct brcms_phy_pub *pih,
-                                        u8 rxcore_bitmask);
-extern u8 wlc_phy_rxcore_getstate_nphy(struct brcms_phy_pub *pih);
-
-extern void wlc_phy_txpwrctrl_enable_nphy(struct brcms_phy *pi, u8 ctrl_type);
-extern void wlc_phy_txpwr_fixpower_nphy(struct brcms_phy *pi);
-extern void wlc_phy_txpwr_apply_nphy(struct brcms_phy *pi);
-extern void wlc_phy_txpwr_papd_cal_nphy(struct brcms_phy *pi);
-extern u16 wlc_phy_txpwr_idx_get_nphy(struct brcms_phy *pi);
-
-extern struct nphy_txgains wlc_phy_get_tx_gain_nphy(struct brcms_phy *pi);
-extern int wlc_phy_cal_txiqlo_nphy(struct brcms_phy *pi,
-                                  struct nphy_txgains target_gain,
-                                  bool full, bool m);
-extern int wlc_phy_cal_rxiq_nphy(struct brcms_phy *pi,
-                                struct nphy_txgains target_gain,
-                                u8 type, bool d);
-extern void wlc_phy_txpwr_index_nphy(struct brcms_phy *pi, u8 core_mask,
-                                    s8 txpwrindex, bool res);
-extern void wlc_phy_rssisel_nphy(struct brcms_phy *pi, u8 core, u8 rssi_type);
-extern int wlc_phy_poll_rssi_nphy(struct brcms_phy *pi, u8 rssi_type,
-                                 s32 *rssi_buf, u8 nsamps);
-extern void wlc_phy_rssi_cal_nphy(struct brcms_phy *pi);
-extern int wlc_phy_aci_scan_nphy(struct brcms_phy *pi);
-extern void wlc_phy_cal_txgainctrl_nphy(struct brcms_phy *pi,
-                                       s32 dBm_targetpower, bool debug);
-extern int wlc_phy_tx_tone_nphy(struct brcms_phy *pi, u32 f_kHz, u16 max_val,
-                               u8 mode, u8, bool);
-extern void wlc_phy_stopplayback_nphy(struct brcms_phy *pi);
-extern void wlc_phy_est_tonepwr_nphy(struct brcms_phy *pi, s32 *qdBm_pwrbuf,
-                                    u8 num_samps);
-extern void wlc_phy_radio205x_vcocal_nphy(struct brcms_phy *pi);
-
-extern int wlc_phy_rssi_compute_nphy(struct brcms_phy *pi,
-                                    struct d11rxhdr *rxh);
+void wlc_phy_cal_perical_nphy_run(struct brcms_phy *pi, u8 caltype);
+void wlc_phy_aci_reset_nphy(struct brcms_phy *pi);
+void wlc_phy_pa_override_nphy(struct brcms_phy *pi, bool en);
+
+u8 wlc_phy_get_chan_freq_range_nphy(struct brcms_phy *pi, uint chan);
+void wlc_phy_switch_radio_nphy(struct brcms_phy *pi, bool on);
+
+void wlc_phy_stf_chain_upd_nphy(struct brcms_phy *pi);
+
+void wlc_phy_force_rfseq_nphy(struct brcms_phy *pi, u8 cmd);
+s16 wlc_phy_tempsense_nphy(struct brcms_phy *pi);
+
+u16 wlc_phy_classifier_nphy(struct brcms_phy *pi, u16 mask, u16 val);
+
+void wlc_phy_rx_iq_est_nphy(struct brcms_phy *pi, struct phy_iq_est *est,
+                           u16 num_samps, u8 wait_time, u8 wait_for_crs);
+
+void wlc_phy_rx_iq_coeffs_nphy(struct brcms_phy *pi, u8 write,
+                              struct nphy_iq_comp *comp);
+void wlc_phy_aci_and_noise_reduction_nphy(struct brcms_phy *pi);
+
+void wlc_phy_rxcore_setstate_nphy(struct brcms_phy_pub *pih, u8 rxcore_bitmask);
+u8 wlc_phy_rxcore_getstate_nphy(struct brcms_phy_pub *pih);
+
+void wlc_phy_txpwrctrl_enable_nphy(struct brcms_phy *pi, u8 ctrl_type);
+void wlc_phy_txpwr_fixpower_nphy(struct brcms_phy *pi);
+void wlc_phy_txpwr_apply_nphy(struct brcms_phy *pi);
+void wlc_phy_txpwr_papd_cal_nphy(struct brcms_phy *pi);
+u16 wlc_phy_txpwr_idx_get_nphy(struct brcms_phy *pi);
+
+struct nphy_txgains wlc_phy_get_tx_gain_nphy(struct brcms_phy *pi);
+int wlc_phy_cal_txiqlo_nphy(struct brcms_phy *pi,
+                           struct nphy_txgains target_gain, bool full, bool m);
+int wlc_phy_cal_rxiq_nphy(struct brcms_phy *pi, struct nphy_txgains target_gain,
+                         u8 type, bool d);
+void wlc_phy_txpwr_index_nphy(struct brcms_phy *pi, u8 core_mask,
+                             s8 txpwrindex, bool res);
+void wlc_phy_rssisel_nphy(struct brcms_phy *pi, u8 core, u8 rssi_type);
+int wlc_phy_poll_rssi_nphy(struct brcms_phy *pi, u8 rssi_type,
+                          s32 *rssi_buf, u8 nsamps);
+void wlc_phy_rssi_cal_nphy(struct brcms_phy *pi);
+int wlc_phy_aci_scan_nphy(struct brcms_phy *pi);
+void wlc_phy_cal_txgainctrl_nphy(struct brcms_phy *pi, s32 dBm_targetpower,
+                                bool debug);
+int wlc_phy_tx_tone_nphy(struct brcms_phy *pi, u32 f_kHz, u16 max_val, u8 mode,
+                        u8, bool);
+void wlc_phy_stopplayback_nphy(struct brcms_phy *pi);
+void wlc_phy_est_tonepwr_nphy(struct brcms_phy *pi, s32 *qdBm_pwrbuf,
+                             u8 num_samps);
+void wlc_phy_radio205x_vcocal_nphy(struct brcms_phy *pi);
+
+int wlc_phy_rssi_compute_nphy(struct brcms_phy *pi, struct d11rxhdr *rxh);
 
 #define NPHY_TESTPATTERN_BPHY_EVM   0
 #define NPHY_TESTPATTERN_BPHY_RFCS  1
 
-extern void wlc_phy_nphy_tkip_rifs_war(struct brcms_phy *pi, u8 rifs);
+void wlc_phy_nphy_tkip_rifs_war(struct brcms_phy *pi, u8 rifs);
 
 void wlc_phy_get_pwrdet_offsets(struct brcms_phy *pi, s8 *cckoffset,
                                s8 *ofdmoffset);
-extern s8 wlc_phy_upd_rssi_offset(struct brcms_phy *pi, s8 rssi,
-                                 u16 chanspec);
+s8 wlc_phy_upd_rssi_offset(struct brcms_phy *pi, s8 rssi, u16 chanspec);
 
-extern bool wlc_phy_n_txpower_ipa_ison(struct brcms_phy *pih);
+bool wlc_phy_n_txpower_ipa_ison(struct brcms_phy *pih);
 #endif                         /* _BRCM_PHY_INT_H_ */
index 2c5b66b75970939ba6ee0ffe2b709faecbc3428e..dd8774717adee148134cb85229f422c72fa143e2 100644 (file)
 
 struct brcms_phy;
 
-extern struct phy_shim_info *wlc_phy_shim_attach(struct brcms_hardware *wlc_hw,
-                                                struct brcms_info *wl,
-                                                struct brcms_c_info *wlc);
-extern void wlc_phy_shim_detach(struct phy_shim_info *physhim);
+struct phy_shim_info *wlc_phy_shim_attach(struct brcms_hardware *wlc_hw,
+                                         struct brcms_info *wl,
+                                         struct brcms_c_info *wlc);
+void wlc_phy_shim_detach(struct phy_shim_info *physhim);
 
 /* PHY to WL utility functions */
-extern struct wlapi_timer *wlapi_init_timer(struct phy_shim_info *physhim,
-                                           void (*fn) (struct brcms_phy *pi),
-                                           void *arg, const char *name);
-extern void wlapi_free_timer(struct wlapi_timer *t);
-extern void wlapi_add_timer(struct wlapi_timer *t, uint ms, int periodic);
-extern bool wlapi_del_timer(struct wlapi_timer *t);
-extern void wlapi_intrson(struct phy_shim_info *physhim);
-extern u32 wlapi_intrsoff(struct phy_shim_info *physhim);
-extern void wlapi_intrsrestore(struct phy_shim_info *physhim,
-                              u32 macintmask);
-
-extern void wlapi_bmac_write_shm(struct phy_shim_info *physhim, uint offset,
-                                u16 v);
-extern u16 wlapi_bmac_read_shm(struct phy_shim_info *physhim, uint offset);
-extern void wlapi_bmac_mhf(struct phy_shim_info *physhim, u8 idx,
-                          u16 mask, u16 val, int bands);
-extern void wlapi_bmac_corereset(struct phy_shim_info *physhim, u32 flags);
-extern void wlapi_suspend_mac_and_wait(struct phy_shim_info *physhim);
-extern void wlapi_switch_macfreq(struct phy_shim_info *physhim, u8 spurmode);
-extern void wlapi_enable_mac(struct phy_shim_info *physhim);
-extern void wlapi_bmac_mctrl(struct phy_shim_info *physhim, u32 mask,
-                            u32 val);
-extern void wlapi_bmac_phy_reset(struct phy_shim_info *physhim);
-extern void wlapi_bmac_bw_set(struct phy_shim_info *physhim, u16 bw);
-extern void wlapi_bmac_phyclk_fgc(struct phy_shim_info *physhim, bool clk);
-extern void wlapi_bmac_macphyclk_set(struct phy_shim_info *physhim, bool clk);
-extern void wlapi_bmac_core_phypll_ctl(struct phy_shim_info *physhim, bool on);
-extern void wlapi_bmac_core_phypll_reset(struct phy_shim_info *physhim);
-extern void wlapi_bmac_ucode_wake_override_phyreg_set(struct phy_shim_info *
-                                                     physhim);
-extern void wlapi_bmac_ucode_wake_override_phyreg_clear(struct phy_shim_info *
-                                                       physhim);
-extern void wlapi_bmac_write_template_ram(struct phy_shim_info *physhim, int o,
-                                         int len, void *buf);
-extern u16 wlapi_bmac_rate_shm_offset(struct phy_shim_info *physhim,
-                                        u8 rate);
-extern void wlapi_ucode_sample_init(struct phy_shim_info *physhim);
-extern void wlapi_copyfrom_objmem(struct phy_shim_info *physhim, uint,
-                                 void *buf, int, u32 sel);
-extern void wlapi_copyto_objmem(struct phy_shim_info *physhim, uint,
-                               const void *buf, int, u32);
-
-extern void wlapi_high_update_phy_mode(struct phy_shim_info *physhim,
-                                      u32 phy_mode);
-extern u16 wlapi_bmac_get_txant(struct phy_shim_info *physhim);
+struct wlapi_timer *wlapi_init_timer(struct phy_shim_info *physhim,
+                                    void (*fn)(struct brcms_phy *pi),
+                                    void *arg, const char *name);
+void wlapi_free_timer(struct wlapi_timer *t);
+void wlapi_add_timer(struct wlapi_timer *t, uint ms, int periodic);
+bool wlapi_del_timer(struct wlapi_timer *t);
+void wlapi_intrson(struct phy_shim_info *physhim);
+u32 wlapi_intrsoff(struct phy_shim_info *physhim);
+void wlapi_intrsrestore(struct phy_shim_info *physhim, u32 macintmask);
+
+void wlapi_bmac_write_shm(struct phy_shim_info *physhim, uint offset, u16 v);
+u16 wlapi_bmac_read_shm(struct phy_shim_info *physhim, uint offset);
+void wlapi_bmac_mhf(struct phy_shim_info *physhim, u8 idx, u16 mask, u16 val,
+                   int bands);
+void wlapi_bmac_corereset(struct phy_shim_info *physhim, u32 flags);
+void wlapi_suspend_mac_and_wait(struct phy_shim_info *physhim);
+void wlapi_switch_macfreq(struct phy_shim_info *physhim, u8 spurmode);
+void wlapi_enable_mac(struct phy_shim_info *physhim);
+void wlapi_bmac_mctrl(struct phy_shim_info *physhim, u32 mask, u32 val);
+void wlapi_bmac_phy_reset(struct phy_shim_info *physhim);
+void wlapi_bmac_bw_set(struct phy_shim_info *physhim, u16 bw);
+void wlapi_bmac_phyclk_fgc(struct phy_shim_info *physhim, bool clk);
+void wlapi_bmac_macphyclk_set(struct phy_shim_info *physhim, bool clk);
+void wlapi_bmac_core_phypll_ctl(struct phy_shim_info *physhim, bool on);
+void wlapi_bmac_core_phypll_reset(struct phy_shim_info *physhim);
+void wlapi_bmac_ucode_wake_override_phyreg_set(struct phy_shim_info *physhim);
+void wlapi_bmac_ucode_wake_override_phyreg_clear(struct phy_shim_info *physhim);
+void wlapi_bmac_write_template_ram(struct phy_shim_info *physhim, int o,
+                                  int len, void *buf);
+u16 wlapi_bmac_rate_shm_offset(struct phy_shim_info *physhim, u8 rate);
+void wlapi_ucode_sample_init(struct phy_shim_info *physhim);
+void wlapi_copyfrom_objmem(struct phy_shim_info *physhim, uint, void *buf,
+                          int, u32 sel);
+void wlapi_copyto_objmem(struct phy_shim_info *physhim, uint, const void *buf,
+                        int, u32);
+
+void wlapi_high_update_phy_mode(struct phy_shim_info *physhim, u32 phy_mode);
+u16 wlapi_bmac_get_txant(struct phy_shim_info *physhim);
 
 #endif                         /* _BRCM_PHY_SHIM_H_ */
index 20e2012d5a3a2d2d83c8b7544f6828d14d3f6f33..a014bbc4f93555cc789bbc337adc36491da12902 100644 (file)
@@ -20,7 +20,7 @@
 
 #include "types.h"
 
-extern u16 si_pmu_fast_pwrup_delay(struct si_pub *sih);
-extern u32 si_pmu_measure_alpclk(struct si_pub *sih);
+u16 si_pmu_fast_pwrup_delay(struct si_pub *sih);
+u32 si_pmu_measure_alpclk(struct si_pub *sih);
 
 #endif /* _BRCM_PMU_H_ */
index d36ea5e1cc494231e56dd18bb2f0dadc65f4304d..4da38cb4f31854a60878c5da31c22cf4e39980b6 100644 (file)
@@ -266,83 +266,76 @@ struct brcms_antselcfg {
 };
 
 /* common functions for every port */
-extern struct brcms_c_info *
-brcms_c_attach(struct brcms_info *wl, struct bcma_device *core, uint unit,
-              bool piomode, uint *perr);
-extern uint brcms_c_detach(struct brcms_c_info *wlc);
-extern int brcms_c_up(struct brcms_c_info *wlc);
-extern uint brcms_c_down(struct brcms_c_info *wlc);
-
-extern bool brcms_c_chipmatch(struct bcma_device *core);
-extern void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx);
-extern void brcms_c_reset(struct brcms_c_info *wlc);
-
-extern void brcms_c_intrson(struct brcms_c_info *wlc);
-extern u32 brcms_c_intrsoff(struct brcms_c_info *wlc);
-extern void brcms_c_intrsrestore(struct brcms_c_info *wlc, u32 macintmask);
-extern bool brcms_c_intrsupd(struct brcms_c_info *wlc);
-extern bool brcms_c_isr(struct brcms_c_info *wlc);
-extern bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded);
-extern bool brcms_c_sendpkt_mac80211(struct brcms_c_info *wlc,
-                                    struct sk_buff *sdu,
-                                    struct ieee80211_hw *hw);
-extern bool brcms_c_aggregatable(struct brcms_c_info *wlc, u8 tid);
-extern void brcms_c_protection_upd(struct brcms_c_info *wlc, uint idx,
-                                  int val);
-extern int brcms_c_get_header_len(void);
-extern void brcms_c_set_addrmatch(struct brcms_c_info *wlc,
-                                 int match_reg_offset,
-                                 const u8 *addr);
-extern void brcms_c_wme_setparams(struct brcms_c_info *wlc, u16 aci,
-                             const struct ieee80211_tx_queue_params *arg,
-                             bool suspend);
-extern struct brcms_pub *brcms_c_pub(struct brcms_c_info *wlc);
-extern void brcms_c_ampdu_flush(struct brcms_c_info *wlc,
-                           struct ieee80211_sta *sta, u16 tid);
-extern void brcms_c_ampdu_tx_operational(struct brcms_c_info *wlc, u8 tid,
-                                        u8 ba_wsize, uint max_rx_ampdu_bytes);
-extern int brcms_c_module_register(struct brcms_pub *pub,
-                                  const char *name, struct brcms_info *hdl,
-                                  int (*down_fn)(void *handle));
-extern int brcms_c_module_unregister(struct brcms_pub *pub, const char *name,
-                                    struct brcms_info *hdl);
-extern void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc);
-extern void brcms_c_enable_mac(struct brcms_c_info *wlc);
-extern void brcms_c_associate_upd(struct brcms_c_info *wlc, bool state);
-extern void brcms_c_scan_start(struct brcms_c_info *wlc);
-extern void brcms_c_scan_stop(struct brcms_c_info *wlc);
-extern int brcms_c_get_curband(struct brcms_c_info *wlc);
-extern int brcms_c_set_channel(struct brcms_c_info *wlc, u16 channel);
-extern int brcms_c_set_rate_limit(struct brcms_c_info *wlc, u16 srl, u16 lrl);
-extern void brcms_c_get_current_rateset(struct brcms_c_info *wlc,
+struct brcms_c_info *brcms_c_attach(struct brcms_info *wl,
+                                   struct bcma_device *core, uint unit,
+                                   bool piomode, uint *perr);
+uint brcms_c_detach(struct brcms_c_info *wlc);
+int brcms_c_up(struct brcms_c_info *wlc);
+uint brcms_c_down(struct brcms_c_info *wlc);
+
+bool brcms_c_chipmatch(struct bcma_device *core);
+void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx);
+void brcms_c_reset(struct brcms_c_info *wlc);
+
+void brcms_c_intrson(struct brcms_c_info *wlc);
+u32 brcms_c_intrsoff(struct brcms_c_info *wlc);
+void brcms_c_intrsrestore(struct brcms_c_info *wlc, u32 macintmask);
+bool brcms_c_intrsupd(struct brcms_c_info *wlc);
+bool brcms_c_isr(struct brcms_c_info *wlc);
+bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded);
+bool brcms_c_sendpkt_mac80211(struct brcms_c_info *wlc, struct sk_buff *sdu,
+                             struct ieee80211_hw *hw);
+bool brcms_c_aggregatable(struct brcms_c_info *wlc, u8 tid);
+void brcms_c_protection_upd(struct brcms_c_info *wlc, uint idx, int val);
+int brcms_c_get_header_len(void);
+void brcms_c_set_addrmatch(struct brcms_c_info *wlc, int match_reg_offset,
+                          const u8 *addr);
+void brcms_c_wme_setparams(struct brcms_c_info *wlc, u16 aci,
+                          const struct ieee80211_tx_queue_params *arg,
+                          bool suspend);
+struct brcms_pub *brcms_c_pub(struct brcms_c_info *wlc);
+void brcms_c_ampdu_flush(struct brcms_c_info *wlc, struct ieee80211_sta *sta,
+                        u16 tid);
+void brcms_c_ampdu_tx_operational(struct brcms_c_info *wlc, u8 tid,
+                                 u8 ba_wsize, uint max_rx_ampdu_bytes);
+int brcms_c_module_register(struct brcms_pub *pub, const char *name,
+                           struct brcms_info *hdl,
+                           int (*down_fn)(void *handle));
+int brcms_c_module_unregister(struct brcms_pub *pub, const char *name,
+                             struct brcms_info *hdl);
+void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc);
+void brcms_c_enable_mac(struct brcms_c_info *wlc);
+void brcms_c_associate_upd(struct brcms_c_info *wlc, bool state);
+void brcms_c_scan_start(struct brcms_c_info *wlc);
+void brcms_c_scan_stop(struct brcms_c_info *wlc);
+int brcms_c_get_curband(struct brcms_c_info *wlc);
+int brcms_c_set_channel(struct brcms_c_info *wlc, u16 channel);
+int brcms_c_set_rate_limit(struct brcms_c_info *wlc, u16 srl, u16 lrl);
+void brcms_c_get_current_rateset(struct brcms_c_info *wlc,
                                 struct brcm_rateset *currs);
-extern int brcms_c_set_rateset(struct brcms_c_info *wlc,
-                                       struct brcm_rateset *rs);
-extern int brcms_c_set_beacon_period(struct brcms_c_info *wlc, u16 period);
-extern u16 brcms_c_get_phy_type(struct brcms_c_info *wlc, int phyidx);
-extern void brcms_c_set_shortslot_override(struct brcms_c_info *wlc,
+int brcms_c_set_rateset(struct brcms_c_info *wlc, struct brcm_rateset *rs);
+int brcms_c_set_beacon_period(struct brcms_c_info *wlc, u16 period);
+u16 brcms_c_get_phy_type(struct brcms_c_info *wlc, int phyidx);
+void brcms_c_set_shortslot_override(struct brcms_c_info *wlc,
                                    s8 sslot_override);
-extern void brcms_c_set_beacon_listen_interval(struct brcms_c_info *wlc,
-                                       u8 interval);
-extern u64 brcms_c_tsf_get(struct brcms_c_info *wlc);
-extern void brcms_c_tsf_set(struct brcms_c_info *wlc, u64 tsf);
-extern int brcms_c_set_tx_power(struct brcms_c_info *wlc, int txpwr);
-extern int brcms_c_get_tx_power(struct brcms_c_info *wlc);
-extern bool brcms_c_check_radio_disabled(struct brcms_c_info *wlc);
-extern void brcms_c_mute(struct brcms_c_info *wlc, bool on);
-extern bool brcms_c_tx_flush_completed(struct brcms_c_info *wlc);
-extern void brcms_c_start_station(struct brcms_c_info *wlc, u8 *addr);
-extern void brcms_c_start_ap(struct brcms_c_info *wlc, u8 *addr,
-                            const u8 *bssid, u8 *ssid, size_t ssid_len);
-extern void brcms_c_start_adhoc(struct brcms_c_info *wlc, u8 *addr);
-extern void brcms_c_update_beacon(struct brcms_c_info *wlc);
-extern void brcms_c_set_new_beacon(struct brcms_c_info *wlc,
-                                  struct sk_buff *beacon, u16 tim_offset,
-                                  u16 dtim_period);
-extern void brcms_c_set_new_probe_resp(struct brcms_c_info *wlc,
-                                      struct sk_buff *probe_resp);
-extern void brcms_c_enable_probe_resp(struct brcms_c_info *wlc, bool enable);
-extern void brcms_c_set_ssid(struct brcms_c_info *wlc, u8 *ssid,
-                            size_t ssid_len);
+void brcms_c_set_beacon_listen_interval(struct brcms_c_info *wlc, u8 interval);
+u64 brcms_c_tsf_get(struct brcms_c_info *wlc);
+void brcms_c_tsf_set(struct brcms_c_info *wlc, u64 tsf);
+int brcms_c_set_tx_power(struct brcms_c_info *wlc, int txpwr);
+int brcms_c_get_tx_power(struct brcms_c_info *wlc);
+bool brcms_c_check_radio_disabled(struct brcms_c_info *wlc);
+void brcms_c_mute(struct brcms_c_info *wlc, bool on);
+bool brcms_c_tx_flush_completed(struct brcms_c_info *wlc);
+void brcms_c_start_station(struct brcms_c_info *wlc, u8 *addr);
+void brcms_c_start_ap(struct brcms_c_info *wlc, u8 *addr, const u8 *bssid,
+                     u8 *ssid, size_t ssid_len);
+void brcms_c_start_adhoc(struct brcms_c_info *wlc, u8 *addr);
+void brcms_c_update_beacon(struct brcms_c_info *wlc);
+void brcms_c_set_new_beacon(struct brcms_c_info *wlc, struct sk_buff *beacon,
+                           u16 tim_offset, u16 dtim_period);
+void brcms_c_set_new_probe_resp(struct brcms_c_info *wlc,
+                               struct sk_buff *probe_resp);
+void brcms_c_enable_probe_resp(struct brcms_c_info *wlc, bool enable);
+void brcms_c_set_ssid(struct brcms_c_info *wlc, u8 *ssid, size_t ssid_len);
 
 #endif                         /* _BRCM_PUB_H_ */
index 980d578825cc0b3b131995bfcf6926bbab0bbbf1..5bb88b78ed648407dbbcb39db73d86ea503d76c9 100644 (file)
@@ -216,34 +216,30 @@ static inline u8 cck_phy2mac_rate(u8 signal)
 
 /* sanitize, and sort a rateset with the basic bit(s) preserved, validate
  * rateset */
-extern bool
-brcms_c_rate_hwrs_filter_sort_validate(struct brcms_c_rateset *rs,
-                                      const struct brcms_c_rateset *hw_rs,
-                                      bool check_brate, u8 txstreams);
+bool brcms_c_rate_hwrs_filter_sort_validate(struct brcms_c_rateset *rs,
+                                           const struct brcms_c_rateset *hw_rs,
+                                           bool check_brate, u8 txstreams);
 /* copy rateset src to dst as-is (no masking or sorting) */
-extern void brcms_c_rateset_copy(const struct brcms_c_rateset *src,
-                            struct brcms_c_rateset *dst);
+void brcms_c_rateset_copy(const struct brcms_c_rateset *src,
+                         struct brcms_c_rateset *dst);
 
 /* would be nice to have these documented ... */
-extern u32 brcms_c_compute_rspec(struct d11rxhdr *rxh, u8 *plcp);
-
-extern void brcms_c_rateset_filter(struct brcms_c_rateset *src,
-       struct brcms_c_rateset *dst, bool basic_only, u8 rates, uint xmask,
-       bool mcsallow);
-
-extern void
-brcms_c_rateset_default(struct brcms_c_rateset *rs_tgt,
-                       const struct brcms_c_rateset *rs_hw, uint phy_type,
-                       int bandtype, bool cck_only, uint rate_mask,
-                       bool mcsallow, u8 bw, u8 txstreams);
-
-extern s16 brcms_c_rate_legacy_phyctl(uint rate);
-
-extern void brcms_c_rateset_mcs_upd(struct brcms_c_rateset *rs, u8 txstreams);
-extern void brcms_c_rateset_mcs_clear(struct brcms_c_rateset *rateset);
-extern void brcms_c_rateset_mcs_build(struct brcms_c_rateset *rateset,
-                                     u8 txstreams);
-extern void brcms_c_rateset_bw_mcs_filter(struct brcms_c_rateset *rateset,
-                                         u8 bw);
+u32 brcms_c_compute_rspec(struct d11rxhdr *rxh, u8 *plcp);
+
+void brcms_c_rateset_filter(struct brcms_c_rateset *src,
+                           struct brcms_c_rateset *dst, bool basic_only,
+                           u8 rates, uint xmask, bool mcsallow);
+
+void brcms_c_rateset_default(struct brcms_c_rateset *rs_tgt,
+                            const struct brcms_c_rateset *rs_hw, uint phy_type,
+                            int bandtype, bool cck_only, uint rate_mask,
+                            bool mcsallow, u8 bw, u8 txstreams);
+
+s16 brcms_c_rate_legacy_phyctl(uint rate);
+
+void brcms_c_rateset_mcs_upd(struct brcms_c_rateset *rs, u8 txstreams);
+void brcms_c_rateset_mcs_clear(struct brcms_c_rateset *rateset);
+void brcms_c_rateset_mcs_build(struct brcms_c_rateset *rateset, u8 txstreams);
+void brcms_c_rateset_bw_mcs_filter(struct brcms_c_rateset *rateset, u8 bw);
 
 #endif                         /* _BRCM_RATE_H_ */
index 19f6580f69be27890eea13ddfe7c689cbe6f3089..ba9493009a3340db524bfef6dc5359c6a8be0ec4 100644 (file)
 
 #include "types.h"
 
-extern int brcms_c_stf_attach(struct brcms_c_info *wlc);
-extern void brcms_c_stf_detach(struct brcms_c_info *wlc);
+int brcms_c_stf_attach(struct brcms_c_info *wlc);
+void brcms_c_stf_detach(struct brcms_c_info *wlc);
 
-extern void brcms_c_tempsense_upd(struct brcms_c_info *wlc);
-extern void brcms_c_stf_ss_algo_channel_get(struct brcms_c_info *wlc,
-                                       u16 *ss_algo_channel,
-                                       u16 chanspec);
-extern int brcms_c_stf_ss_update(struct brcms_c_info *wlc,
-                            struct brcms_band *band);
-extern void brcms_c_stf_phy_txant_upd(struct brcms_c_info *wlc);
-extern int brcms_c_stf_txchain_set(struct brcms_c_info *wlc, s32 int_val,
-                              bool force);
-extern bool brcms_c_stf_stbc_rx_set(struct brcms_c_info *wlc, s32 int_val);
-extern void brcms_c_stf_phy_txant_upd(struct brcms_c_info *wlc);
-extern void brcms_c_stf_phy_chain_calc(struct brcms_c_info *wlc);
-extern u16 brcms_c_stf_phytxchain_sel(struct brcms_c_info *wlc,
-                                     u32 rspec);
-extern u16 brcms_c_stf_d11hdrs_phyctl_txant(struct brcms_c_info *wlc,
-                                       u32 rspec);
+void brcms_c_tempsense_upd(struct brcms_c_info *wlc);
+void brcms_c_stf_ss_algo_channel_get(struct brcms_c_info *wlc,
+                                    u16 *ss_algo_channel, u16 chanspec);
+int brcms_c_stf_ss_update(struct brcms_c_info *wlc, struct brcms_band *band);
+void brcms_c_stf_phy_txant_upd(struct brcms_c_info *wlc);
+int brcms_c_stf_txchain_set(struct brcms_c_info *wlc, s32 int_val, bool force);
+bool brcms_c_stf_stbc_rx_set(struct brcms_c_info *wlc, s32 int_val);
+void brcms_c_stf_phy_txant_upd(struct brcms_c_info *wlc);
+void brcms_c_stf_phy_chain_calc(struct brcms_c_info *wlc);
+u16 brcms_c_stf_phytxchain_sel(struct brcms_c_info *wlc, u32 rspec);
+u16 brcms_c_stf_d11hdrs_phyctl_txant(struct brcms_c_info *wlc, u32 rspec);
 
 #endif                         /* _BRCM_STF_H_ */
index 18750a814b4f219f918aaed081d8e99f01e7f6f4..c87dd89bcb78bddd2774bb7275741b1507aaf8d3 100644 (file)
@@ -43,16 +43,14 @@ struct brcms_ucode {
        u32 *bcm43xx_bomminor;
 };
 
-extern int
-brcms_ucode_data_init(struct brcms_info *wl, struct brcms_ucode *ucode);
+int brcms_ucode_data_init(struct brcms_info *wl, struct brcms_ucode *ucode);
 
-extern void brcms_ucode_data_free(struct brcms_ucode *ucode);
+void brcms_ucode_data_free(struct brcms_ucode *ucode);
 
-extern int brcms_ucode_init_buf(struct brcms_info *wl, void **pbuf,
-                               unsigned int idx);
-extern int brcms_ucode_init_uint(struct brcms_info *wl, size_t *n_bytes,
-                                unsigned int idx);
-extern void brcms_ucode_free_buf(void *);
-extern int  brcms_check_firmwares(struct brcms_info *wl);
+int brcms_ucode_init_buf(struct brcms_info *wl, void **pbuf, unsigned int idx);
+int brcms_ucode_init_uint(struct brcms_info *wl, size_t *n_bytes,
+                         unsigned int idx);
+void brcms_ucode_free_buf(void *);
+int  brcms_check_firmwares(struct brcms_info *wl);
 
 #endif /* _BRCM_UCODE_H_ */
index 92623f02b1c045460c28d860520000c127cfc611..8660a2cba09810428f127967c996a02cfbc174a1 100644 (file)
@@ -140,6 +140,6 @@ struct brcmu_d11inf {
        void (*decchspec)(struct brcmu_chan *ch);
 };
 
-extern void brcmu_d11_attach(struct brcmu_d11inf *d11inf);
+void brcmu_d11_attach(struct brcmu_d11inf *d11inf);
 
 #endif /* _BRCMU_CHANNELS_H_ */
index 898cacb8d01df299f1e4dce5ebc72fc008e5b4f2..8ba445b3fd72a92ca78cbd1042dcc38528df8efb 100644 (file)
@@ -114,31 +114,29 @@ static inline struct sk_buff *pktq_ppeek_tail(struct pktq *pq, int prec)
        return skb_peek_tail(&pq->q[prec].skblist);
 }
 
-extern struct sk_buff *brcmu_pktq_penq(struct pktq *pq, int prec,
-                                struct sk_buff *p);
-extern struct sk_buff *brcmu_pktq_penq_head(struct pktq *pq, int prec,
-                                     struct sk_buff *p);
-extern struct sk_buff *brcmu_pktq_pdeq(struct pktq *pq, int prec);
-extern struct sk_buff *brcmu_pktq_pdeq_tail(struct pktq *pq, int prec);
-extern struct sk_buff *brcmu_pktq_pdeq_match(struct pktq *pq, int prec,
-                                            bool (*match_fn)(struct sk_buff *p,
-                                                             void *arg),
-                                            void *arg);
+struct sk_buff *brcmu_pktq_penq(struct pktq *pq, int prec, struct sk_buff *p);
+struct sk_buff *brcmu_pktq_penq_head(struct pktq *pq, int prec,
+                                    struct sk_buff *p);
+struct sk_buff *brcmu_pktq_pdeq(struct pktq *pq, int prec);
+struct sk_buff *brcmu_pktq_pdeq_tail(struct pktq *pq, int prec);
+struct sk_buff *brcmu_pktq_pdeq_match(struct pktq *pq, int prec,
+                                     bool (*match_fn)(struct sk_buff *p,
+                                                      void *arg),
+                                     void *arg);
 
 /* packet primitives */
-extern struct sk_buff *brcmu_pkt_buf_get_skb(uint len);
-extern void brcmu_pkt_buf_free_skb(struct sk_buff *skb);
+struct sk_buff *brcmu_pkt_buf_get_skb(uint len);
+void brcmu_pkt_buf_free_skb(struct sk_buff *skb);
 
 /* Empty the queue at particular precedence level */
 /* callback function fn(pkt, arg) returns true if pkt belongs to if */
-extern void brcmu_pktq_pflush(struct pktq *pq, int prec,
-       bool dir, bool (*fn)(struct sk_buff *, void *), void *arg);
+void brcmu_pktq_pflush(struct pktq *pq, int prec, bool dir,
+                      bool (*fn)(struct sk_buff *, void *), void *arg);
 
 /* operations on a set of precedences in packet queue */
 
-extern int brcmu_pktq_mlen(struct pktq *pq, uint prec_bmp);
-extern struct sk_buff *brcmu_pktq_mdeq(struct pktq *pq, uint prec_bmp,
-       int *prec_out);
+int brcmu_pktq_mlen(struct pktq *pq, uint prec_bmp);
+struct sk_buff *brcmu_pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out);
 
 /* operations on packet queue as a whole */
 
@@ -167,11 +165,11 @@ static inline bool pktq_empty(struct pktq *pq)
        return pq->len == 0;
 }
 
-extern void brcmu_pktq_init(struct pktq *pq, int num_prec, int max_len);
+void brcmu_pktq_init(struct pktq *pq, int num_prec, int max_len);
 /* prec_out may be NULL if caller is not interested in return value */
-extern struct sk_buff *brcmu_pktq_peek_tail(struct pktq *pq, int *prec_out);
-extern void brcmu_pktq_flush(struct pktq *pq, bool dir,
-               bool (*fn)(struct sk_buff *, void *), void *arg);
+struct sk_buff *brcmu_pktq_peek_tail(struct pktq *pq, int *prec_out);
+void brcmu_pktq_flush(struct pktq *pq, bool dir,
+                     bool (*fn)(struct sk_buff *, void *), void *arg);
 
 /* externs */
 /* ip address */
@@ -204,13 +202,13 @@ static inline u16 brcmu_maskget16(u16 var, u16 mask, u8 shift)
 /* externs */
 /* format/print */
 #ifdef DEBUG
-extern void brcmu_prpkt(const char *msg, struct sk_buff *p0);
+void brcmu_prpkt(const char *msg, struct sk_buff *p0);
 #else
 #define brcmu_prpkt(a, b)
 #endif                         /* DEBUG */
 
 #ifdef DEBUG
-extern __printf(3, 4)
+__printf(3, 4)
 void brcmu_dbg_hex_dump(const void *data, size_t size, const char *fmt, ...);
 #else
 __printf(3, 4)
index f5e6b489ed3277e29cb6b2f4fba2f33bd2abf8fd..899cad34ccd3aa1649029d90294fe21c30608d14 100644 (file)
@@ -42,7 +42,6 @@ struct hwbus_priv {
        spinlock_t              lock; /* Serialize all bus operations */
        wait_queue_head_t       wq;
        int claimed;
-       int irq_disabled;
 };
 
 #define SDIO_TO_SPI_ADDR(addr) ((addr & 0x1f)>>2)
@@ -238,8 +237,6 @@ static irqreturn_t cw1200_spi_irq_handler(int irq, void *dev_id)
        struct hwbus_priv *self = dev_id;
 
        if (self->core) {
-               disable_irq_nosync(self->func->irq);
-               self->irq_disabled = 1;
                cw1200_irq_handler(self->core);
                return IRQ_HANDLED;
        } else {
@@ -253,9 +250,10 @@ static int cw1200_spi_irq_subscribe(struct hwbus_priv *self)
 
        pr_debug("SW IRQ subscribe\n");
 
-       ret = request_any_context_irq(self->func->irq, cw1200_spi_irq_handler,
-                                     IRQF_TRIGGER_HIGH,
-                                     "cw1200_wlan_irq", self);
+       ret = request_threaded_irq(self->func->irq, NULL,
+                                  cw1200_spi_irq_handler,
+                                  IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+                                  "cw1200_wlan_irq", self);
        if (WARN_ON(ret < 0))
                goto exit;
 
@@ -273,22 +271,13 @@ exit:
 
 static int cw1200_spi_irq_unsubscribe(struct hwbus_priv *self)
 {
+       int ret = 0;
+
        pr_debug("SW IRQ unsubscribe\n");
        disable_irq_wake(self->func->irq);
        free_irq(self->func->irq, self);
 
-       return 0;
-}
-
-static int cw1200_spi_irq_enable(struct hwbus_priv *self, int enable)
-{
-       /* Disables are handled by the interrupt handler */
-       if (enable && self->irq_disabled) {
-               enable_irq(self->func->irq);
-               self->irq_disabled = 0;
-       }
-
-       return 0;
+       return ret;
 }
 
 static int cw1200_spi_off(const struct cw1200_platform_data_spi *pdata)
@@ -368,7 +357,6 @@ static struct hwbus_ops cw1200_spi_hwbus_ops = {
        .unlock                 = cw1200_spi_unlock,
        .align_size             = cw1200_spi_align_size,
        .power_mgmt             = cw1200_spi_pm,
-       .irq_enable             = cw1200_spi_irq_enable,
 };
 
 /* Probe Function to be called by SPI stack when device is discovered */
index 0b2061bbc68bfa99ba9bce91dcdba18b99984879..acdff0f7f952e03fa25b51b47a9e83eda37d85c6 100644 (file)
@@ -485,7 +485,7 @@ int cw1200_load_firmware(struct cw1200_common *priv)
 
        /* Enable interrupt signalling */
        priv->hwbus_ops->lock(priv->hwbus_priv);
-       ret = __cw1200_irq_enable(priv, 2);
+       ret = __cw1200_irq_enable(priv, 1);
        priv->hwbus_ops->unlock(priv->hwbus_priv);
        if (ret < 0)
                goto unsubscribe;
index 51dfb3a90735521f18a1c572440e3ec8e486f5c8..8b2fc831c3de75f6f33799cc12c6174bad6d837b 100644 (file)
@@ -28,7 +28,6 @@ struct hwbus_ops {
        void (*unlock)(struct hwbus_priv *self);
        size_t (*align_size)(struct hwbus_priv *self, size_t size);
        int (*power_mgmt)(struct hwbus_priv *self, bool suspend);
-       int (*irq_enable)(struct hwbus_priv *self, int enable);
 };
 
 #endif /* CW1200_HWBUS_H */
index 41bd7615ccaa31f2ba8bf8c183a88db0a73bca60..ff230b7aeedd646e1591d01bdd1514afa86d7f0b 100644 (file)
@@ -273,21 +273,6 @@ int __cw1200_irq_enable(struct cw1200_common *priv, int enable)
        u16 val16;
        int ret;
 
-       /* We need to do this hack because the SPI layer can sleep on I/O
-          and the general path involves I/O to the device in interrupt
-          context.
-
-          However, the initial enable call needs to go to the hardware.
-
-          We don't worry about shutdown because we do a full reset which
-          clears the interrupt enabled bits.
-       */
-       if (priv->hwbus_ops->irq_enable) {
-               ret = priv->hwbus_ops->irq_enable(priv->hwbus_priv, enable);
-               if (ret || enable < 2)
-                       return ret;
-       }
-
        if (HIF_8601_SILICON == priv->hw_type) {
                ret = __cw1200_reg_read_32(priv, ST90TDS_CONFIG_REG_ID, &val32);
                if (ret < 0) {
index 970a48baaf804a38ff1883702bbd9460d7d5b8c8..de7c4ffec3096b07ccaece961666b0bdde51ea27 100644 (file)
@@ -217,7 +217,7 @@ static void prism2_host_roaming(local_info_t *local)
                }
        }
 
-       memcpy(req.bssid, selected->bssid, 6);
+       memcpy(req.bssid, selected->bssid, ETH_ALEN);
        req.channel = selected->chid;
        spin_unlock_irqrestore(&local->lock, flags);
 
index 6b823a1ab7892fd9c81be07ce56bc45e75a6b8f1..8711a511fd5242217d40dee6ca1874c59fa07dfc 100644 (file)
@@ -2698,7 +2698,7 @@ static u16 eeprom_read_u16(struct ipw_priv *priv, u8 addr)
 /* data's copy of the eeprom data                                 */
 static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac)
 {
-       memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], 6);
+       memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], ETH_ALEN);
 }
 
 static void ipw_read_eeprom(struct ipw_priv *priv)
index 6eede52ad8c0fbc0581a2b3a68d302cb589f4dde..5ce2f59d3378ed7ddcc4891769d72401eb66cc0d 100644 (file)
@@ -950,66 +950,55 @@ static inline int libipw_is_cck_rate(u8 rate)
 }
 
 /* libipw.c */
-extern void free_libipw(struct net_device *dev, int monitor);
-extern struct net_device *alloc_libipw(int sizeof_priv, int monitor);
-extern int libipw_change_mtu(struct net_device *dev, int new_mtu);
+void free_libipw(struct net_device *dev, int monitor);
+struct net_device *alloc_libipw(int sizeof_priv, int monitor);
+int libipw_change_mtu(struct net_device *dev, int new_mtu);
 
-extern void libipw_networks_age(struct libipw_device *ieee,
-                                  unsigned long age_secs);
+void libipw_networks_age(struct libipw_device *ieee, unsigned long age_secs);
 
-extern int libipw_set_encryption(struct libipw_device *ieee);
+int libipw_set_encryption(struct libipw_device *ieee);
 
 /* libipw_tx.c */
-extern netdev_tx_t libipw_xmit(struct sk_buff *skb,
-                              struct net_device *dev);
-extern void libipw_txb_free(struct libipw_txb *);
+netdev_tx_t libipw_xmit(struct sk_buff *skb, struct net_device *dev);
+void libipw_txb_free(struct libipw_txb *);
 
 /* libipw_rx.c */
-extern void libipw_rx_any(struct libipw_device *ieee,
-                    struct sk_buff *skb, struct libipw_rx_stats *stats);
-extern int libipw_rx(struct libipw_device *ieee, struct sk_buff *skb,
-                       struct libipw_rx_stats *rx_stats);
+void libipw_rx_any(struct libipw_device *ieee, struct sk_buff *skb,
+                  struct libipw_rx_stats *stats);
+int libipw_rx(struct libipw_device *ieee, struct sk_buff *skb,
+             struct libipw_rx_stats *rx_stats);
 /* make sure to set stats->len */
-extern void libipw_rx_mgt(struct libipw_device *ieee,
-                            struct libipw_hdr_4addr *header,
-                            struct libipw_rx_stats *stats);
-extern void libipw_network_reset(struct libipw_network *network);
+void libipw_rx_mgt(struct libipw_device *ieee, struct libipw_hdr_4addr *header,
+                  struct libipw_rx_stats *stats);
+void libipw_network_reset(struct libipw_network *network);
 
 /* libipw_geo.c */
-extern const struct libipw_geo *libipw_get_geo(struct libipw_device
-                                                    *ieee);
-extern void libipw_set_geo(struct libipw_device *ieee,
-                            const struct libipw_geo *geo);
-
-extern int libipw_is_valid_channel(struct libipw_device *ieee,
-                                     u8 channel);
-extern int libipw_channel_to_index(struct libipw_device *ieee,
-                                     u8 channel);
-extern u8 libipw_freq_to_channel(struct libipw_device *ieee, u32 freq);
-extern u8 libipw_get_channel_flags(struct libipw_device *ieee,
-                                     u8 channel);
-extern const struct libipw_channel *libipw_get_channel(struct
-                                                            libipw_device
-                                                            *ieee, u8 channel);
-extern u32 libipw_channel_to_freq(struct libipw_device * ieee,
-                                     u8 channel);
+const struct libipw_geo *libipw_get_geo(struct libipw_device *ieee);
+void libipw_set_geo(struct libipw_device *ieee, const struct libipw_geo *geo);
+
+int libipw_is_valid_channel(struct libipw_device *ieee, u8 channel);
+int libipw_channel_to_index(struct libipw_device *ieee, u8 channel);
+u8 libipw_freq_to_channel(struct libipw_device *ieee, u32 freq);
+u8 libipw_get_channel_flags(struct libipw_device *ieee, u8 channel);
+const struct libipw_channel *libipw_get_channel(struct libipw_device *ieee,
+                                               u8 channel);
+u32 libipw_channel_to_freq(struct libipw_device *ieee, u8 channel);
 
 /* libipw_wx.c */
-extern int libipw_wx_get_scan(struct libipw_device *ieee,
-                                struct iw_request_info *info,
-                                union iwreq_data *wrqu, char *key);
-extern int libipw_wx_set_encode(struct libipw_device *ieee,
-                                  struct iw_request_info *info,
-                                  union iwreq_data *wrqu, char *key);
-extern int libipw_wx_get_encode(struct libipw_device *ieee,
-                                  struct iw_request_info *info,
-                                  union iwreq_data *wrqu, char *key);
-extern int libipw_wx_set_encodeext(struct libipw_device *ieee,
-                                     struct iw_request_info *info,
-                                     union iwreq_data *wrqu, char *extra);
-extern int libipw_wx_get_encodeext(struct libipw_device *ieee,
-                                     struct iw_request_info *info,
-                                     union iwreq_data *wrqu, char *extra);
+int libipw_wx_get_scan(struct libipw_device *ieee, struct iw_request_info *info,
+                      union iwreq_data *wrqu, char *key);
+int libipw_wx_set_encode(struct libipw_device *ieee,
+                        struct iw_request_info *info, union iwreq_data *wrqu,
+                        char *key);
+int libipw_wx_get_encode(struct libipw_device *ieee,
+                        struct iw_request_info *info, union iwreq_data *wrqu,
+                        char *key);
+int libipw_wx_set_encodeext(struct libipw_device *ieee,
+                           struct iw_request_info *info,
+                           union iwreq_data *wrqu, char *extra);
+int libipw_wx_get_encodeext(struct libipw_device *ieee,
+                           struct iw_request_info *info,
+                           union iwreq_data *wrqu, char *extra);
 
 static inline void libipw_increment_scans(struct libipw_device *ieee)
 {
index 9a8703def0ba1a1825c264b5e5806003412c5e91..00030d43a1947380cf818bae7ff11a31b26d92a8 100644 (file)
@@ -189,15 +189,14 @@ struct il3945_ibss_seq {
  * for use by iwl-*.c
  *
  *****************************************************************************/
-extern int il3945_calc_db_from_ratio(int sig_ratio);
-extern void il3945_rx_replenish(void *data);
-extern void il3945_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq);
-extern unsigned int il3945_fill_beacon_frame(struct il_priv *il,
-                                            struct ieee80211_hdr *hdr,
-                                            int left);
-extern int il3945_dump_nic_event_log(struct il_priv *il, bool full_log,
-                                    char **buf, bool display);
-extern void il3945_dump_nic_error_log(struct il_priv *il);
+int il3945_calc_db_from_ratio(int sig_ratio);
+void il3945_rx_replenish(void *data);
+void il3945_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq);
+unsigned int il3945_fill_beacon_frame(struct il_priv *il,
+                                     struct ieee80211_hdr *hdr, int left);
+int il3945_dump_nic_event_log(struct il_priv *il, bool full_log, char **buf,
+                             bool display);
+void il3945_dump_nic_error_log(struct il_priv *il);
 
 /******************************************************************************
  *
@@ -215,39 +214,36 @@ extern void il3945_dump_nic_error_log(struct il_priv *il);
  * il3945_mac_     <-- mac80211 callback
  *
  ****************************************************************************/
-extern void il3945_hw_handler_setup(struct il_priv *il);
-extern void il3945_hw_setup_deferred_work(struct il_priv *il);
-extern void il3945_hw_cancel_deferred_work(struct il_priv *il);
-extern int il3945_hw_rxq_stop(struct il_priv *il);
-extern int il3945_hw_set_hw_params(struct il_priv *il);
-extern int il3945_hw_nic_init(struct il_priv *il);
-extern int il3945_hw_nic_stop_master(struct il_priv *il);
-extern void il3945_hw_txq_ctx_free(struct il_priv *il);
-extern void il3945_hw_txq_ctx_stop(struct il_priv *il);
-extern int il3945_hw_nic_reset(struct il_priv *il);
-extern int il3945_hw_txq_attach_buf_to_tfd(struct il_priv *il,
-                                          struct il_tx_queue *txq,
-                                          dma_addr_t addr, u16 len, u8 reset,
-                                          u8 pad);
-extern void il3945_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq);
-extern int il3945_hw_get_temperature(struct il_priv *il);
-extern int il3945_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq);
-extern unsigned int il3945_hw_get_beacon_cmd(struct il_priv *il,
-                                            struct il3945_frame *frame,
-                                            u8 rate);
+void il3945_hw_handler_setup(struct il_priv *il);
+void il3945_hw_setup_deferred_work(struct il_priv *il);
+void il3945_hw_cancel_deferred_work(struct il_priv *il);
+int il3945_hw_rxq_stop(struct il_priv *il);
+int il3945_hw_set_hw_params(struct il_priv *il);
+int il3945_hw_nic_init(struct il_priv *il);
+int il3945_hw_nic_stop_master(struct il_priv *il);
+void il3945_hw_txq_ctx_free(struct il_priv *il);
+void il3945_hw_txq_ctx_stop(struct il_priv *il);
+int il3945_hw_nic_reset(struct il_priv *il);
+int il3945_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq,
+                                   dma_addr_t addr, u16 len, u8 reset, u8 pad);
+void il3945_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq);
+int il3945_hw_get_temperature(struct il_priv *il);
+int il3945_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq);
+unsigned int il3945_hw_get_beacon_cmd(struct il_priv *il,
+                                     struct il3945_frame *frame, u8 rate);
 void il3945_hw_build_tx_cmd_rate(struct il_priv *il, struct il_device_cmd *cmd,
                                 struct ieee80211_tx_info *info,
                                 struct ieee80211_hdr *hdr, int sta_id);
-extern int il3945_hw_reg_send_txpower(struct il_priv *il);
-extern int il3945_hw_reg_set_txpower(struct il_priv *il, s8 power);
-extern void il3945_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb);
+int il3945_hw_reg_send_txpower(struct il_priv *il);
+int il3945_hw_reg_set_txpower(struct il_priv *il, s8 power);
+void il3945_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb);
 void il3945_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb);
-extern void il3945_disable_events(struct il_priv *il);
-extern int il4965_get_temperature(const struct il_priv *il);
-extern void il3945_post_associate(struct il_priv *il);
-extern void il3945_config_ap(struct il_priv *il);
+void il3945_disable_events(struct il_priv *il);
+int il4965_get_temperature(const struct il_priv *il);
+void il3945_post_associate(struct il_priv *il);
+void il3945_config_ap(struct il_priv *il);
 
-extern int il3945_commit_rxon(struct il_priv *il);
+int il3945_commit_rxon(struct il_priv *il);
 
 /**
  * il3945_hw_find_station - Find station id for a given BSSID
@@ -257,14 +253,14 @@ extern int il3945_commit_rxon(struct il_priv *il);
  * not yet been merged into a single common layer for managing the
  * station tables.
  */
-extern u8 il3945_hw_find_station(struct il_priv *il, const u8 * bssid);
+u8 il3945_hw_find_station(struct il_priv *il, const u8 *bssid);
 
-extern __le32 il3945_get_antenna_flags(const struct il_priv *il);
-extern int il3945_init_hw_rate_table(struct il_priv *il);
-extern void il3945_reg_txpower_periodic(struct il_priv *il);
-extern int il3945_txpower_set_from_eeprom(struct il_priv *il);
+__le32 il3945_get_antenna_flags(const struct il_priv *il);
+int il3945_init_hw_rate_table(struct il_priv *il);
+void il3945_reg_txpower_periodic(struct il_priv *il);
+int il3945_txpower_set_from_eeprom(struct il_priv *il);
 
-extern int il3945_rs_next_rate(struct il_priv *il, int rate);
+int il3945_rs_next_rate(struct il_priv *il, int rate);
 
 /* scanning */
 int il3945_request_scan(struct il_priv *il, struct ieee80211_vif *vif);
index 1b15b0b2292b4fe06c3fa1efcc95279c9901370e..337dfcf3bbde7c58fe4681268f63b9d29ea8f5fa 100644 (file)
@@ -272,7 +272,7 @@ il4965_hw_valid_rtc_data_addr(u32 addr)
        ((t) < IL_TX_POWER_TEMPERATURE_MIN || \
         (t) > IL_TX_POWER_TEMPERATURE_MAX)
 
-extern void il4965_temperature_calib(struct il_priv *il);
+void il4965_temperature_calib(struct il_priv *il);
 /********************* END TEMPERATURE ***************************************/
 
 /********************* START TXPOWER *****************************************/
index 83f8ed8a5528cbd209c97560e9ab9c59d2294a90..ad123d66ab6c5c13e2bd120df7b3c2b412d1400a 100644 (file)
@@ -858,9 +858,9 @@ struct il_hw_params {
  * il4965_mac_     <-- mac80211 callback
  *
  ****************************************************************************/
-extern void il4965_update_chain_flags(struct il_priv *il);
+void il4965_update_chain_flags(struct il_priv *il);
 extern const u8 il_bcast_addr[ETH_ALEN];
-extern int il_queue_space(const struct il_queue *q);
+int il_queue_space(const struct il_queue *q);
 static inline int
 il_queue_used(const struct il_queue *q, int i)
 {
@@ -1727,7 +1727,7 @@ int il_alloc_txq_mem(struct il_priv *il);
 void il_free_txq_mem(struct il_priv *il);
 
 #ifdef CONFIG_IWLEGACY_DEBUGFS
-extern void il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len);
+void il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len);
 #else
 static inline void
 il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len)
@@ -1760,12 +1760,12 @@ void il_chswitch_done(struct il_priv *il, bool is_success);
 /*****************************************************
 * TX
 ******************************************************/
-extern void il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq);
-extern int il_tx_queue_init(struct il_priv *il, u32 txq_id);
-extern void il_tx_queue_reset(struct il_priv *il, u32 txq_id);
-extern void il_tx_queue_unmap(struct il_priv *il, int txq_id);
-extern void il_tx_queue_free(struct il_priv *il, int txq_id);
-extern void il_setup_watchdog(struct il_priv *il);
+void il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq);
+int il_tx_queue_init(struct il_priv *il, u32 txq_id);
+void il_tx_queue_reset(struct il_priv *il, u32 txq_id);
+void il_tx_queue_unmap(struct il_priv *il, int txq_id);
+void il_tx_queue_free(struct il_priv *il, int txq_id);
+void il_setup_watchdog(struct il_priv *il);
 /*****************************************************
  * TX power
  ****************************************************/
@@ -1931,10 +1931,10 @@ il_is_ready_rf(struct il_priv *il)
        return il_is_ready(il);
 }
 
-extern void il_send_bt_config(struct il_priv *il);
-extern int il_send_stats_request(struct il_priv *il, u8 flags, bool clear);
-extern void il_apm_stop(struct il_priv *il);
-extern void _il_apm_stop(struct il_priv *il);
+void il_send_bt_config(struct il_priv *il);
+int il_send_stats_request(struct il_priv *il, u8 flags, bool clear);
+void il_apm_stop(struct il_priv *il);
+void _il_apm_stop(struct il_priv *il);
 
 int il_apm_init(struct il_priv *il);
 
@@ -1968,15 +1968,15 @@ void il_tx_cmd_protection(struct il_priv *il, struct ieee80211_tx_info *info,
 
 irqreturn_t il_isr(int irq, void *data);
 
-extern void il_set_bit(struct il_priv *p, u32 r, u32 m);
-extern void il_clear_bit(struct il_priv *p, u32 r, u32 m);
-extern bool _il_grab_nic_access(struct il_priv *il);
-extern int _il_poll_bit(struct il_priv *il, u32 addr, u32 bits, u32 mask, int timeout);
-extern int il_poll_bit(struct il_priv *il, u32 addr, u32 mask, int timeout);
-extern u32 il_rd_prph(struct il_priv *il, u32 reg);
-extern void il_wr_prph(struct il_priv *il, u32 addr, u32 val);
-extern u32 il_read_targ_mem(struct il_priv *il, u32 addr);
-extern void il_write_targ_mem(struct il_priv *il, u32 addr, u32 val);
+void il_set_bit(struct il_priv *p, u32 r, u32 m);
+void il_clear_bit(struct il_priv *p, u32 r, u32 m);
+bool _il_grab_nic_access(struct il_priv *il);
+int _il_poll_bit(struct il_priv *il, u32 addr, u32 bits, u32 mask, int timeout);
+int il_poll_bit(struct il_priv *il, u32 addr, u32 mask, int timeout);
+u32 il_rd_prph(struct il_priv *il, u32 reg);
+void il_wr_prph(struct il_priv *il, u32 addr, u32 val);
+u32 il_read_targ_mem(struct il_priv *il, u32 addr);
+void il_write_targ_mem(struct il_priv *il, u32 addr, u32 val);
 
 static inline void
 _il_write8(struct il_priv *il, u32 ofs, u8 val)
@@ -2868,13 +2868,13 @@ il4965_first_antenna(u8 mask)
  * The specific throughput table used is based on the type of network
  * the associated with, including A, B, G, and G w/ TGG protection
  */
-extern void il3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id);
+void il3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id);
 
 /* Initialize station's rate scaling information after adding station */
-extern void il4965_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta,
-                               u8 sta_id);
-extern void il3945_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta,
-                               u8 sta_id);
+void il4965_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta,
+                        u8 sta_id);
+void il3945_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta,
+                        u8 sta_id);
 
 /**
  * il_rate_control_register - Register the rate control algorithm callbacks
@@ -2886,8 +2886,8 @@ extern void il3945_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta,
  * ieee80211_register_hw
  *
  */
-extern int il4965_rate_control_register(void);
-extern int il3945_rate_control_register(void);
+int il4965_rate_control_register(void);
+int il3945_rate_control_register(void);
 
 /**
  * il_rate_control_unregister - Unregister the rate control callbacks
@@ -2895,11 +2895,11 @@ extern int il3945_rate_control_register(void);
  * This should be called after calling ieee80211_unregister_hw, but before
  * the driver is unloaded.
  */
-extern void il4965_rate_control_unregister(void);
-extern void il3945_rate_control_unregister(void);
+void il4965_rate_control_unregister(void);
+void il3945_rate_control_unregister(void);
 
-extern int il_power_update_mode(struct il_priv *il, bool force);
-extern void il_power_initialize(struct il_priv *il);
+int il_power_update_mode(struct il_priv *il, bool force);
+void il_power_initialize(struct il_priv *il);
 
 extern u32 il_debug_level;
 
index f2a86ffc3b4cf09440874c88b6f0cc3d804ec3a6..23d5f0275ce98e1cde6c05e7b9a1654c870774ea 100644 (file)
@@ -397,7 +397,7 @@ static inline __le32 iwl_hw_set_rate_n_flags(u8 rate, u32 flags)
        return cpu_to_le32(flags|(u32)rate);
 }
 
-extern int iwl_alive_start(struct iwl_priv *priv);
+int iwl_alive_start(struct iwl_priv *priv);
 
 #ifdef CONFIG_IWLWIFI_DEBUG
 void iwl_print_rx_config_cmd(struct iwl_priv *priv,
index a79fdd137f956ce5cb4de2ef7e1c0f239e76895a..7434d9edf3b773566530b79eb542a8431bbfb24c 100644 (file)
@@ -270,7 +270,7 @@ struct iwl_sensitivity_ranges {
  * iwlXXXX_     <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
  *
  ****************************************************************************/
-extern void iwl_update_chain_flags(struct iwl_priv *priv);
+void iwl_update_chain_flags(struct iwl_priv *priv);
 extern const u8 iwl_bcast_addr[ETH_ALEN];
 
 #define IWL_OPERATION_MODE_AUTO     0
index 5d83cab22d625084389b8880b46a4abd21fb9f44..26fc550cd68c58ae24342232bda51be2dfd690a5 100644 (file)
@@ -407,8 +407,8 @@ static inline u8 first_antenna(u8 mask)
 
 
 /* Initialize station's rate scaling information after adding station */
-extern void iwl_rs_rate_init(struct iwl_priv *priv,
-                            struct ieee80211_sta *sta, u8 sta_id);
+void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta,
+                     u8 sta_id);
 
 /**
  * iwl_rate_control_register - Register the rate control algorithm callbacks
@@ -420,7 +420,7 @@ extern void iwl_rs_rate_init(struct iwl_priv *priv,
  * ieee80211_register_hw
  *
  */
-extern int iwlagn_rate_control_register(void);
+int iwlagn_rate_control_register(void);
 
 /**
  * iwl_rate_control_unregister - Unregister the rate control callbacks
@@ -428,6 +428,6 @@ extern int iwlagn_rate_control_register(void);
  * This should be called after calling ieee80211_unregister_hw, but before
  * the driver is unloaded.
  */
-extern void iwlagn_rate_control_unregister(void);
+void iwlagn_rate_control_unregister(void);
 
 #endif /* __iwl_agn__rs__ */
index 335cf16829023e6702ef1d5921c19cccb3196c84..465d40ee176f0187cd8914cd7d4e26c86524cb3b 100644 (file)
@@ -314,9 +314,8 @@ static inline u8 num_of_ant(u8 mask)
 }
 
 /* Initialize station's rate scaling information after adding station */
-extern void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm,
-                                struct ieee80211_sta *sta,
-                                enum ieee80211_band band);
+void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+                         enum ieee80211_band band);
 
 /**
  * iwl_rate_control_register - Register the rate control algorithm callbacks
@@ -328,7 +327,7 @@ extern void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm,
  * ieee80211_register_hw
  *
  */
-extern int iwl_mvm_rate_control_register(void);
+int iwl_mvm_rate_control_register(void);
 
 /**
  * iwl_rate_control_unregister - Unregister the rate control callbacks
@@ -336,7 +335,7 @@ extern int iwl_mvm_rate_control_register(void);
  * This should be called after calling ieee80211_unregister_hw, but before
  * the driver is unloaded.
  */
-extern void iwl_mvm_rate_control_unregister(void);
+void iwl_mvm_rate_control_unregister(void);
 
 struct iwl_mvm_sta;
 
index 21c688264708316b34c51d196aa36fff5f5aaf01..1214c587fd08587f263b2c979eab9bc902b53ae1 100644 (file)
@@ -150,7 +150,7 @@ mwifiex_11n_form_amsdu_txpd(struct mwifiex_private *priv,
  */
 int
 mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
-                         struct mwifiex_ra_list_tbl *pra_list, int headroom,
+                         struct mwifiex_ra_list_tbl *pra_list,
                          int ptrindex, unsigned long ra_list_flags)
                          __releases(&priv->wmm.ra_list_spinlock)
 {
@@ -160,6 +160,7 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
        int pad = 0, ret;
        struct mwifiex_tx_param tx_param;
        struct txpd *ptx_pd = NULL;
+       int headroom = adapter->iface_type == MWIFIEX_USB ? 0 : INTF_HEADER_LEN;
 
        skb_src = skb_peek(&pra_list->skb_head);
        if (!skb_src) {
index 900e1c62a0cceb4499457be3b76be7e9589be415..892098d6a69687dd2d8c1fc61612a6fb9999d754 100644 (file)
@@ -26,7 +26,7 @@
 int mwifiex_11n_deaggregate_pkt(struct mwifiex_private *priv,
                                struct sk_buff *skb);
 int mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
-                             struct mwifiex_ra_list_tbl *ptr, int headroom,
+                             struct mwifiex_ra_list_tbl *ptr,
                              int ptr_index, unsigned long flags)
                              __releases(&priv->wmm.ra_list_spinlock);
 
index 2d761477d15e5761ed968200484a6fd55d066f39..a6c46f3b6e3a0d622b796f094b5b75016428438e 100644 (file)
@@ -1155,7 +1155,7 @@ int mwifiex_ret_802_11_hs_cfg(struct mwifiex_private *priv,
        uint32_t conditions = le32_to_cpu(phs_cfg->params.hs_config.conditions);
 
        if (phs_cfg->action == cpu_to_le16(HS_ACTIVATE) &&
-           adapter->iface_type == MWIFIEX_SDIO) {
+           adapter->iface_type != MWIFIEX_USB) {
                mwifiex_hs_activated_event(priv, true);
                return 0;
        } else {
@@ -1167,8 +1167,7 @@ int mwifiex_ret_802_11_hs_cfg(struct mwifiex_private *priv,
        }
        if (conditions != HS_CFG_CANCEL) {
                adapter->is_hs_configured = true;
-               if (adapter->iface_type == MWIFIEX_USB ||
-                   adapter->iface_type == MWIFIEX_PCIE)
+               if (adapter->iface_type == MWIFIEX_USB)
                        mwifiex_hs_activated_event(priv, true);
        } else {
                adapter->is_hs_configured = false;
index 2472d4b7f00e997bc9e6b21586edcf42fe692498..1c70b8d092270ba3a456664aaf6ec3e9da4b3b59 100644 (file)
@@ -447,9 +447,6 @@ static int mwifiex_usb_suspend(struct usb_interface *intf, pm_message_t message)
         */
        adapter->is_suspended = true;
 
-       for (i = 0; i < adapter->priv_num; i++)
-               netif_carrier_off(adapter->priv[i]->netdev);
-
        if (atomic_read(&card->rx_cmd_urb_pending) && card->rx_cmd.urb)
                usb_kill_urb(card->rx_cmd.urb);
 
@@ -509,10 +506,6 @@ static int mwifiex_usb_resume(struct usb_interface *intf)
                                                  MWIFIEX_RX_CMD_BUF_SIZE);
        }
 
-       for (i = 0; i < adapter->priv_num; i++)
-               if (adapter->priv[i]->media_connected)
-                       netif_carrier_on(adapter->priv[i]->netdev);
-
        /* Disable Host Sleep */
        if (adapter->hs_activated)
                mwifiex_cancel_hs(mwifiex_get_priv(adapter,
index 2e8f9cdea54d719cf0f25b4b29d816eb0709ff7a..95fa3599b4070b02aecc8d4d46073137b05c5b98 100644 (file)
@@ -1239,8 +1239,7 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
                if (enable_tx_amsdu && mwifiex_is_amsdu_allowed(priv, tid) &&
                    mwifiex_is_11n_aggragation_possible(priv, ptr,
                                                        adapter->tx_buf_size))
-                       mwifiex_11n_aggregate_pkt(priv, ptr, INTF_HEADER_LEN,
-                                                 ptr_index, flags);
+                       mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index, flags);
                        /* ra_list_spinlock has been freed in
                           mwifiex_11n_aggregate_pkt() */
                else
index 644d6e0c51ccf235e5b457da85f57c64029e5c2d..0f129d498fb1057d898db2c6e92868d8ff1f3baf 100644 (file)
@@ -83,11 +83,10 @@ mwifiex_wmm_is_ra_list_empty(struct list_head *ra_list_hhead)
 }
 
 void mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
-                                       struct sk_buff *skb);
+                                struct sk_buff *skb);
 void mwifiex_ralist_add(struct mwifiex_private *priv, u8 *ra);
 void mwifiex_rotate_priolists(struct mwifiex_private *priv,
-                             struct mwifiex_ra_list_tbl *ra,
-                             int tid);
+                             struct mwifiex_ra_list_tbl *ra, int tid);
 
 int mwifiex_wmm_lists_empty(struct mwifiex_adapter *adapter);
 void mwifiex_wmm_process_tx(struct mwifiex_adapter *adapter);
@@ -95,21 +94,18 @@ int mwifiex_is_ralist_valid(struct mwifiex_private *priv,
                            struct mwifiex_ra_list_tbl *ra_list, int tid);
 
 u8 mwifiex_wmm_compute_drv_pkt_delay(struct mwifiex_private *priv,
-                                            const struct sk_buff *skb);
+                                    const struct sk_buff *skb);
 void mwifiex_wmm_init(struct mwifiex_adapter *adapter);
 
-extern u32 mwifiex_wmm_process_association_req(struct mwifiex_private *priv,
-                                                u8 **assoc_buf,
-                                                struct ieee_types_wmm_parameter
-                                                *wmmie,
-                                                struct ieee80211_ht_cap
-                                                *htcap);
+u32 mwifiex_wmm_process_association_req(struct mwifiex_private *priv,
+                                       u8 **assoc_buf,
+                                       struct ieee_types_wmm_parameter *wmmie,
+                                       struct ieee80211_ht_cap *htcap);
 
 void mwifiex_wmm_setup_queue_priorities(struct mwifiex_private *priv,
-                                       struct ieee_types_wmm_parameter
-                                       *wmm_ie);
+                                       struct ieee_types_wmm_parameter *wmm_ie);
 void mwifiex_wmm_setup_ac_downgrade(struct mwifiex_private *priv);
-extern int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
-                                     const struct host_cmd_ds_command *resp);
+int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
+                              const struct host_cmd_ds_command *resp);
 
 #endif /* !_MWIFIEX_WMM_H_ */
index 3bb936b9558c7f96dedc215189f72b49a313b8d7..eebd2be21ee9067e7d90f45f32b227bb07601998 100644 (file)
@@ -182,23 +182,20 @@ extern int orinoco_debug;
 /* Exported prototypes                                              */
 /********************************************************************/
 
-extern struct orinoco_private *alloc_orinocodev(
-       int sizeof_card, struct device *device,
-       int (*hard_reset)(struct orinoco_private *),
-       int (*stop_fw)(struct orinoco_private *, int));
-extern void free_orinocodev(struct orinoco_private *priv);
-extern int orinoco_init(struct orinoco_private *priv);
-extern int orinoco_if_add(struct orinoco_private *priv,
-                         unsigned long base_addr,
-                         unsigned int irq,
-                         const struct net_device_ops *ops);
-extern void orinoco_if_del(struct orinoco_private *priv);
-extern int orinoco_up(struct orinoco_private *priv);
-extern void orinoco_down(struct orinoco_private *priv);
-extern irqreturn_t orinoco_interrupt(int irq, void *dev_id);
-
-extern void __orinoco_ev_info(struct net_device *dev, struct hermes *hw);
-extern void __orinoco_ev_rx(struct net_device *dev, struct hermes *hw);
+struct orinoco_private *alloc_orinocodev(int sizeof_card, struct device *device,
+                                        int (*hard_reset)(struct orinoco_private *),
+                                        int (*stop_fw)(struct orinoco_private *, int));
+void free_orinocodev(struct orinoco_private *priv);
+int orinoco_init(struct orinoco_private *priv);
+int orinoco_if_add(struct orinoco_private *priv, unsigned long base_addr,
+                  unsigned int irq, const struct net_device_ops *ops);
+void orinoco_if_del(struct orinoco_private *priv);
+int orinoco_up(struct orinoco_private *priv);
+void orinoco_down(struct orinoco_private *priv);
+irqreturn_t orinoco_interrupt(int irq, void *dev_id);
+
+void __orinoco_ev_info(struct net_device *dev, struct hermes *hw);
+void __orinoco_ev_rx(struct net_device *dev, struct hermes *hw);
 
 int orinoco_process_xmit_skb(struct sk_buff *skb,
                             struct net_device *dev,
index b9deef66cf4b8179893b918fbd0fb3365480a686..e328d3058c419a0c7c4c367fd7e62ada31ddded4 100644 (file)
@@ -83,6 +83,7 @@ static struct usb_device_id p54u_table[] = {
        {USB_DEVICE(0x06a9, 0x000e)},   /* Westell 802.11g USB (A90-211WG-01) */
        {USB_DEVICE(0x06b9, 0x0121)},   /* Thomson SpeedTouch 121g */
        {USB_DEVICE(0x0707, 0xee13)},   /* SMC 2862W-G version 2 */
+       {USB_DEVICE(0x07aa, 0x0020)},   /* Corega WLUSB2GTST USB */
        {USB_DEVICE(0x0803, 0x4310)},   /* Zoom 4410a */
        {USB_DEVICE(0x083a, 0x4521)},   /* Siemens Gigaset USB Adapter 54 version 2 */
        {USB_DEVICE(0x083a, 0x4531)},   /* T-Com Sinus 154 data II */
@@ -979,6 +980,7 @@ static int p54u_load_firmware(struct ieee80211_hw *dev,
        if (err) {
                dev_err(&priv->udev->dev, "(p54usb) cannot load firmware %s "
                                          "(%d)!\n", p54u_fwlist[i].fw, err);
+               usb_put_dev(udev);
        }
 
        return err;
index 1c22b81e6ef35e30f86afc994445ff66ef4f7c36..8863a6cb2388d952926cf8fe209a8b67fa004ddb 100644 (file)
@@ -183,7 +183,7 @@ prism54_update_stats(struct work_struct *work)
        data = r.ptr;
 
        /* copy this MAC to the bss */
-       memcpy(bss.address, data, 6);
+       memcpy(bss.address, data, ETH_ALEN);
        kfree(data);
 
        /* now ask for the corresponding bss */
@@ -531,7 +531,7 @@ prism54_set_wap(struct net_device *ndev, struct iw_request_info *info,
                return -EINVAL;
 
        /* prepare the structure for the set object */
-       memcpy(&bssid[0], awrq->sa_data, 6);
+       memcpy(&bssid[0], awrq->sa_data, ETH_ALEN);
 
        /* set the bssid -- does this make sense when in AP mode? */
        rvalue = mgt_set_request(priv, DOT11_OID_BSSID, 0, &bssid);
@@ -550,7 +550,7 @@ prism54_get_wap(struct net_device *ndev, struct iw_request_info *info,
        int rvalue;
 
        rvalue = mgt_get_request(priv, DOT11_OID_BSSID, 0, NULL, &r);
-       memcpy(awrq->sa_data, r.ptr, 6);
+       memcpy(awrq->sa_data, r.ptr, ETH_ALEN);
        awrq->sa_family = ARPHRD_ETHER;
        kfree(r.ptr);
 
@@ -582,7 +582,7 @@ prism54_translate_bss(struct net_device *ndev, struct iw_request_info *info,
        size_t wpa_ie_len;
 
        /* The first entry must be the MAC address */
-       memcpy(iwe.u.ap_addr.sa_data, bss->address, 6);
+       memcpy(iwe.u.ap_addr.sa_data, bss->address, ETH_ALEN);
        iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
        iwe.cmd = SIOCGIWAP;
        current_ev = iwe_stream_add_event(info, current_ev, end_buf,
@@ -2489,7 +2489,7 @@ prism54_set_mac_address(struct net_device *ndev, void *addr)
                              &((struct sockaddr *) addr)->sa_data);
        if (!ret)
                memcpy(priv->ndev->dev_addr,
-                      &((struct sockaddr *) addr)->sa_data, 6);
+                      &((struct sockaddr *) addr)->sa_data, ETH_ALEN);
 
        return ret;
 }
index 5970ff6f40cc315dd253054c84f031cceda01170..41a16d30c79c5be46f89229d3185a033d8131f0f 100644 (file)
@@ -837,7 +837,7 @@ islpci_setup(struct pci_dev *pdev)
        /* ndev->set_multicast_list = &islpci_set_multicast_list; */
        ndev->addr_len = ETH_ALEN;
        /* Get a non-zero dummy MAC address for nameif. Jean II */
-       memcpy(ndev->dev_addr, dummy_mac, 6);
+       memcpy(ndev->dev_addr, dummy_mac, ETH_ALEN);
 
        ndev->watchdog_timeo = ISLPCI_TX_TIMEOUT;
 
index a01606b36e03f836a5cc57ab292dd47f2ba2352f..056af38e72e399d88528343e978fec15d512f180 100644 (file)
@@ -682,7 +682,7 @@ mgt_update_addr(islpci_private *priv)
                                     isl_oid[GEN_OID_MACADDRESS].size, &res);
 
        if ((ret == 0) && res && (res->header->operation != PIMFOR_OP_ERROR))
-               memcpy(priv->ndev->dev_addr, res->data, 6);
+               memcpy(priv->ndev->dev_addr, res->data, ETH_ALEN);
        else
                ret = -EIO;
        if (res)
index 35e00086a520bf06393237b3aebf0aa3bbb9e617..0105e6c1901ed96fc6be6cfd4900b2285ed0835a 100644 (file)
 #define        CAM_CONFIG_USEDK                                1
 #define        CAM_CONFIG_NO_USEDK                             0
 
-extern void rtl_cam_reset_all_entry(struct ieee80211_hw *hw);
-extern u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
-                       u32 ul_key_id, u32 ul_entry_idx, u32 ul_enc_alg,
-                       u32 ul_default_key, u8 *key_content);
+void rtl_cam_reset_all_entry(struct ieee80211_hw *hw);
+u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
+                        u32 ul_key_id, u32 ul_entry_idx, u32 ul_enc_alg,
+                        u32 ul_default_key, u8 *key_content);
 int rtl_cam_delete_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
-                       u32 ul_key_id);
+                            u32 ul_key_id);
 void rtl_cam_mark_invalid(struct ieee80211_hw *hw, u8 uc_index);
 void rtl_cam_empty_entry(struct ieee80211_hw *hw, u8 uc_index);
 void rtl_cam_reset_sec_info(struct ieee80211_hw *hw);
index 733b7ce7f0e2a981f442cd9cdadd8f40a6426558..210ce7cd94d8d14201a68ce285e9c880993d30db 100644 (file)
@@ -115,7 +115,7 @@ static void rtl_op_stop(struct ieee80211_hw *hw)
        mutex_lock(&rtlpriv->locks.conf_mutex);
 
        mac->link_state = MAC80211_NOLINK;
-       memset(mac->bssid, 0, 6);
+       memset(mac->bssid, 0, ETH_ALEN);
        mac->vendor = PEER_UNKNOWN;
 
        /*reset sec info */
@@ -280,7 +280,7 @@ static void rtl_op_remove_interface(struct ieee80211_hw *hw,
        mac->p2p = 0;
        mac->vif = NULL;
        mac->link_state = MAC80211_NOLINK;
-       memset(mac->bssid, 0, 6);
+       memset(mac->bssid, 0, ETH_ALEN);
        mac->vendor = PEER_UNKNOWN;
        mac->opmode = NL80211_IFTYPE_UNSPECIFIED;
        rtlpriv->cfg->ops->set_network_type(hw, mac->opmode);
@@ -721,7 +721,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
                        mac->link_state = MAC80211_LINKED;
                        mac->cnt_after_linked = 0;
                        mac->assoc_id = bss_conf->aid;
-                       memcpy(mac->bssid, bss_conf->bssid, 6);
+                       memcpy(mac->bssid, bss_conf->bssid, ETH_ALEN);
 
                        if (rtlpriv->cfg->ops->linked_set_reg)
                                rtlpriv->cfg->ops->linked_set_reg(hw);
@@ -750,7 +750,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
                        if (ppsc->p2p_ps_info.p2p_ps_mode > P2P_PS_NONE)
                                rtl_p2p_ps_cmd(hw, P2P_PS_DISABLE);
                        mac->link_state = MAC80211_NOLINK;
-                       memset(mac->bssid, 0, 6);
+                       memset(mac->bssid, 0, ETH_ALEN);
                        mac->vendor = PEER_UNKNOWN;
 
                        if (rtlpriv->dm.supp_phymode_switch) {
@@ -826,7 +826,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
                         bss_conf->bssid);
 
                mac->vendor = PEER_UNKNOWN;
-               memcpy(mac->bssid, bss_conf->bssid, 6);
+               memcpy(mac->bssid, bss_conf->bssid, ETH_ALEN);
                rtlpriv->cfg->ops->set_network_type(hw, vif->type);
 
                rcu_read_lock();
index 395a326acfb44a0874b331b64442d23b5e120d1a..1663b3afd41e90e67512823f0c5a0f7ee02ef80b 100644 (file)
@@ -104,20 +104,19 @@ struct efuse_priv {
        u8 tx_power_g[14];
 };
 
-extern void read_efuse_byte(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf);
-extern void efuse_initialize(struct ieee80211_hw *hw);
-extern u8 efuse_read_1byte(struct ieee80211_hw *hw, u16 address);
-extern void efuse_write_1byte(struct ieee80211_hw *hw, u16 address, u8 value);
-extern void read_efuse(struct ieee80211_hw *hw, u16 _offset,
-                      u16 _size_byte, u8 *pbuf);
-extern void efuse_shadow_read(struct ieee80211_hw *hw, u8 type,
-                             u16 offset, u32 *value);
-extern void efuse_shadow_write(struct ieee80211_hw *hw, u8 type,
-                              u16 offset, u32 value);
-extern bool efuse_shadow_update(struct ieee80211_hw *hw);
-extern bool efuse_shadow_update_chk(struct ieee80211_hw *hw);
-extern void rtl_efuse_shadow_map_update(struct ieee80211_hw *hw);
-extern void efuse_force_write_vendor_Id(struct ieee80211_hw *hw);
-extern void efuse_re_pg_section(struct ieee80211_hw *hw, u8 section_idx);
+void read_efuse_byte(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf);
+void efuse_initialize(struct ieee80211_hw *hw);
+u8 efuse_read_1byte(struct ieee80211_hw *hw, u16 address);
+void efuse_write_1byte(struct ieee80211_hw *hw, u16 address, u8 value);
+void read_efuse(struct ieee80211_hw *hw, u16 _offset, u16 _size_byte, u8 *pbuf);
+void efuse_shadow_read(struct ieee80211_hw *hw, u8 type, u16 offset,
+                      u32 *value);
+void efuse_shadow_write(struct ieee80211_hw *hw, u8 type, u16 offset,
+                       u32 value);
+bool efuse_shadow_update(struct ieee80211_hw *hw);
+bool efuse_shadow_update_chk(struct ieee80211_hw *hw);
+void rtl_efuse_shadow_map_update(struct ieee80211_hw *hw);
+void efuse_force_write_vendor_Id(struct ieee80211_hw *hw);
+void efuse_re_pg_section(struct ieee80211_hw *hw, u8 section_idx);
 
 #endif
index f1acd6d27e444ca4fac956d14190de873ef15d06..71ddf4f3f6ccc36bf95d65c265299d8867bc08d8 100644 (file)
@@ -200,37 +200,32 @@ enum _ANT_DIV_TYPE {
        CGCS_RX_SW_ANTDIV               = 0x05,
 };
 
-extern u32 rtl88e_phy_query_bb_reg(struct ieee80211_hw *hw,
-                                  u32 regaddr, u32 bitmask);
-extern void rtl88e_phy_set_bb_reg(struct ieee80211_hw *hw,
-                                 u32 regaddr, u32 bitmask, u32 data);
-extern u32 rtl88e_phy_query_rf_reg(struct ieee80211_hw *hw,
-                                  enum radio_path rfpath, u32 regaddr,
-                                  u32 bitmask);
-extern void rtl88e_phy_set_rf_reg(struct ieee80211_hw *hw,
-                                 enum radio_path rfpath, u32 regaddr,
-                                 u32 bitmask, u32 data);
-extern bool rtl88e_phy_mac_config(struct ieee80211_hw *hw);
-extern bool rtl88e_phy_bb_config(struct ieee80211_hw *hw);
-extern bool rtl88e_phy_rf_config(struct ieee80211_hw *hw);
-extern void rtl88e_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
-extern void rtl88e_phy_get_txpower_level(struct ieee80211_hw *hw,
-                                        long *powerlevel);
-extern void rtl88e_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel);
-extern void rtl88e_phy_scan_operation_backup(struct ieee80211_hw *hw,
-                                            u8 operation);
-extern void rtl88e_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
-extern void rtl88e_phy_set_bw_mode(struct ieee80211_hw *hw,
-                                  enum nl80211_channel_type ch_type);
-extern void rtl88e_phy_sw_chnl_callback(struct ieee80211_hw *hw);
-extern u8 rtl88e_phy_sw_chnl(struct ieee80211_hw *hw);
-extern void rtl88e_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery);
+u32 rtl88e_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask);
+void rtl88e_phy_set_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask,
+                          u32 data);
+u32 rtl88e_phy_query_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
+                           u32 regaddr, u32 bitmask);
+void rtl88e_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
+                          u32 regaddr, u32 bitmask, u32 data);
+bool rtl88e_phy_mac_config(struct ieee80211_hw *hw);
+bool rtl88e_phy_bb_config(struct ieee80211_hw *hw);
+bool rtl88e_phy_rf_config(struct ieee80211_hw *hw);
+void rtl88e_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
+void rtl88e_phy_get_txpower_level(struct ieee80211_hw *hw, long *powerlevel);
+void rtl88e_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel);
+void rtl88e_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation);
+void rtl88e_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
+void rtl88e_phy_set_bw_mode(struct ieee80211_hw *hw,
+                           enum nl80211_channel_type ch_type);
+void rtl88e_phy_sw_chnl_callback(struct ieee80211_hw *hw);
+u8 rtl88e_phy_sw_chnl(struct ieee80211_hw *hw);
+void rtl88e_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery);
 void rtl88e_phy_lc_calibrate(struct ieee80211_hw *hw);
 void rtl88e_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain);
 bool rtl88e_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
                                          enum radio_path rfpath);
 bool rtl88e_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
-extern bool rtl88e_phy_set_rf_power_state(struct ieee80211_hw *hw,
-                                         enum rf_pwrstate rfpwr_state);
+bool rtl88e_phy_set_rf_power_state(struct ieee80211_hw *hw,
+                                  enum rf_pwrstate rfpwr_state);
 
 #endif
index d5e3b704f9304a596bd943cfcbebd00db6559776..f8973e58c173b80a4ccba0c2306b95fae1aa568e 100644 (file)
@@ -188,65 +188,55 @@ struct tx_power_struct {
 };
 
 bool rtl92c_phy_bb_config(struct ieee80211_hw *hw);
-u32 rtl92c_phy_query_bb_reg(struct ieee80211_hw *hw,
-                                  u32 regaddr, u32 bitmask);
-void rtl92c_phy_set_bb_reg(struct ieee80211_hw *hw,
-                                 u32 regaddr, u32 bitmask, u32 data);
-u32 rtl92c_phy_query_rf_reg(struct ieee80211_hw *hw,
-                                  enum radio_path rfpath, u32 regaddr,
-                                  u32 bitmask);
-extern void rtl92ce_phy_set_rf_reg(struct ieee80211_hw *hw,
-                                  enum radio_path rfpath, u32 regaddr,
-                                  u32 bitmask, u32 data);
+u32 rtl92c_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask);
+void rtl92c_phy_set_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask,
+                          u32 data);
+u32 rtl92c_phy_query_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
+                           u32 regaddr, u32 bitmask);
+void rtl92ce_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
+                           u32 regaddr, u32 bitmask, u32 data);
 bool rtl92c_phy_mac_config(struct ieee80211_hw *hw);
 bool rtl92ce_phy_bb_config(struct ieee80211_hw *hw);
 bool rtl92c_phy_rf_config(struct ieee80211_hw *hw);
 bool rtl92c_phy_config_rf_with_feaderfile(struct ieee80211_hw *hw,
-                                                enum radio_path rfpath);
+                                         enum radio_path rfpath);
 void rtl92c_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
-void rtl92c_phy_get_txpower_level(struct ieee80211_hw *hw,
-                                        long *powerlevel);
+void rtl92c_phy_get_txpower_level(struct ieee80211_hw *hw, long *powerlevel);
 void rtl92c_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel);
-bool rtl92c_phy_update_txpower_dbm(struct ieee80211_hw *hw,
-                                         long power_indbm);
-void rtl92c_phy_scan_operation_backup(struct ieee80211_hw *hw,
-                                            u8 operation);
+bool rtl92c_phy_update_txpower_dbm(struct ieee80211_hw *hw, long power_indbm);
+void rtl92c_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation);
 void rtl92c_phy_set_bw_mode(struct ieee80211_hw *hw,
-                                  enum nl80211_channel_type ch_type);
+                           enum nl80211_channel_type ch_type);
 void rtl92c_phy_sw_chnl_callback(struct ieee80211_hw *hw);
 u8 rtl92c_phy_sw_chnl(struct ieee80211_hw *hw);
 void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery);
-void rtl92c_phy_set_beacon_hw_reg(struct ieee80211_hw *hw,
-                                        u16 beaconinterval);
+void rtl92c_phy_set_beacon_hw_reg(struct ieee80211_hw *hw, u16 beaconinterval);
 void rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw, char delta);
 void rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw);
 void _rtl92ce_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t);
 void rtl92c_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain);
 bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
                                          enum radio_path rfpath);
-bool rtl8192_phy_check_is_legal_rfpath(struct ieee80211_hw *hw,
-                                             u32 rfpath);
+bool rtl8192_phy_check_is_legal_rfpath(struct ieee80211_hw *hw, u32 rfpath);
 bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
 bool rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
-                                         enum rf_pwrstate rfpwr_state);
+                                   enum rf_pwrstate rfpwr_state);
 void rtl92ce_phy_set_rf_on(struct ieee80211_hw *hw);
 bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
 void rtl92c_phy_set_io(struct ieee80211_hw *hw);
 void rtl92c_bb_block_on(struct ieee80211_hw *hw);
-u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw,
-                                     enum radio_path rfpath, u32 offset);
+u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw, enum radio_path rfpath,
+                              u32 offset);
 u32 _rtl92c_phy_fw_rf_serial_read(struct ieee80211_hw *hw,
-                                        enum radio_path rfpath, u32 offset);
+                                 enum radio_path rfpath, u32 offset);
 u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask);
 void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw,
-                                       enum radio_path rfpath, u32 offset,
-                                       u32 data);
+                                enum radio_path rfpath, u32 offset, u32 data);
 void _rtl92c_phy_fw_rf_serial_write(struct ieee80211_hw *hw,
-                                          enum radio_path rfpath, u32 offset,
-                                          u32 data);
+                                   enum radio_path rfpath, u32 offset,
+                                   u32 data);
 void _rtl92c_store_pwrIndex_diffrate_offset(struct ieee80211_hw *hw,
-                                                  u32 regaddr, u32 bitmask,
-                                                  u32 data);
+                                           u32 regaddr, u32 bitmask, u32 data);
 bool _rtl92ce_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);
 void _rtl92c_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw);
 bool _rtl92c_phy_bb8192c_config_parafile(struct ieee80211_hw *hw);
index 6c8d56efceae2fd8e76e8e128fea6e720241cf12..d8fe68b389d213e17b133b7044fec647b315f2b8 100644 (file)
 #define RF6052_MAX_REG                 0x3F
 #define RF6052_MAX_PATH                        2
 
-extern void rtl92ce_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw,
-                                            u8 bandwidth);
-extern void rtl92ce_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
-                                              u8 *ppowerlevel);
-extern void rtl92ce_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
-                                               u8 *ppowerlevel, u8 channel);
-extern bool rtl92ce_phy_rf6052_config(struct ieee80211_hw *hw);
+void rtl92ce_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth);
+void rtl92ce_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+                                       u8 *ppowerlevel);
+void rtl92ce_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
+                                        u8 *ppowerlevel, u8 channel);
+bool rtl92ce_phy_rf6052_config(struct ieee80211_hw *hw);
 #endif
index 090fd33a158db1635c14d3ea323a5363f7c82169..11b439d6b67167c11e1529919bc5f07dfe96e060 100644 (file)
 #define RF6052_MAX_REG                 0x3F
 #define RF6052_MAX_PATH                        2
 
-extern void rtl92cu_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw,
-                                           u8 bandwidth);
-extern void rtl92c_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
-                                             u8 *ppowerlevel);
-extern void rtl92c_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
-                                              u8 *ppowerlevel, u8 channel);
+void rtl92cu_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth);
+void rtl92c_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+                                      u8 *ppowerlevel);
+void rtl92c_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
+                                       u8 *ppowerlevel, u8 channel);
 bool rtl92cu_phy_rf6052_config(struct ieee80211_hw *hw);
 bool rtl92cu_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
-                                         enum radio_path rfpath);
+                                          enum radio_path rfpath);
 void rtl92cu_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
                                        u8 *ppowerlevel);
 void rtl92cu_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
index 7c9f7a2f1e427d2bac52cfacdea38c949f390a2e..1bc7b1a96d4aebe5577067460e56fa698405b3a8 100644 (file)
@@ -55,10 +55,9 @@ void rtl92de_set_key(struct ieee80211_hw *hw, u32 key_index,
                     u8 *p_macaddr, bool is_group, u8 enc_algo,
                     bool is_wepkey, bool clear_all);
 
-extern void rtl92de_write_dword_dbi(struct ieee80211_hw *hw, u16 offset,
-                                   u32 value, u8 direct);
-extern u32 rtl92de_read_dword_dbi(struct ieee80211_hw *hw, u16 offset,
-                                 u8 direct);
+void rtl92de_write_dword_dbi(struct ieee80211_hw *hw, u16 offset, u32 value,
+                            u8 direct);
+u32 rtl92de_read_dword_dbi(struct ieee80211_hw *hw, u16 offset, u8 direct);
 void rtl92de_suspend(struct ieee80211_hw *hw);
 void rtl92de_resume(struct ieee80211_hw *hw);
 void rtl92d_linked_set_reg(struct ieee80211_hw *hw);
index f074952bf25c53c3fcd6dbf6ae22f0a1f8de9316..0f993f451cdb5ea91eb238a68e2b86e459053b80 100644 (file)
@@ -127,34 +127,30 @@ static inline void rtl92d_release_cckandrw_pagea_ctl(struct ieee80211_hw *hw,
                        *flag);
 }
 
-extern u32 rtl92d_phy_query_bb_reg(struct ieee80211_hw *hw,
-                                  u32 regaddr, u32 bitmask);
-extern void rtl92d_phy_set_bb_reg(struct ieee80211_hw *hw,
-                                 u32 regaddr, u32 bitmask, u32 data);
-extern u32 rtl92d_phy_query_rf_reg(struct ieee80211_hw *hw,
-                                  enum radio_path rfpath, u32 regaddr,
-                                  u32 bitmask);
-extern void rtl92d_phy_set_rf_reg(struct ieee80211_hw *hw,
-                                 enum radio_path rfpath, u32 regaddr,
-                                 u32 bitmask, u32 data);
-extern bool rtl92d_phy_mac_config(struct ieee80211_hw *hw);
-extern bool rtl92d_phy_bb_config(struct ieee80211_hw *hw);
-extern bool rtl92d_phy_rf_config(struct ieee80211_hw *hw);
-extern bool rtl92c_phy_config_rf_with_feaderfile(struct ieee80211_hw *hw,
-                                                enum radio_path rfpath);
-extern void rtl92d_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
-extern void rtl92d_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel);
-extern void rtl92d_phy_scan_operation_backup(struct ieee80211_hw *hw,
-                                            u8 operation);
-extern void rtl92d_phy_set_bw_mode(struct ieee80211_hw *hw,
-                                  enum nl80211_channel_type ch_type);
-extern u8 rtl92d_phy_sw_chnl(struct ieee80211_hw *hw);
+u32 rtl92d_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask);
+void rtl92d_phy_set_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask,
+                          u32 data);
+u32 rtl92d_phy_query_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
+                           u32 regaddr, u32 bitmask);
+void rtl92d_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
+                          u32 regaddr, u32 bitmask, u32 data);
+bool rtl92d_phy_mac_config(struct ieee80211_hw *hw);
+bool rtl92d_phy_bb_config(struct ieee80211_hw *hw);
+bool rtl92d_phy_rf_config(struct ieee80211_hw *hw);
+bool rtl92c_phy_config_rf_with_feaderfile(struct ieee80211_hw *hw,
+                                         enum radio_path rfpath);
+void rtl92d_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
+void rtl92d_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel);
+void rtl92d_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation);
+void rtl92d_phy_set_bw_mode(struct ieee80211_hw *hw,
+                           enum nl80211_channel_type ch_type);
+u8 rtl92d_phy_sw_chnl(struct ieee80211_hw *hw);
 bool rtl92d_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
                                          enum rf_content content,
                                          enum radio_path rfpath);
 bool rtl92d_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
-extern bool rtl92d_phy_set_rf_power_state(struct ieee80211_hw *hw,
-                                         enum rf_pwrstate rfpwr_state);
+bool rtl92d_phy_set_rf_power_state(struct ieee80211_hw *hw,
+                                  enum rf_pwrstate rfpwr_state);
 
 void rtl92d_phy_config_macphymode(struct ieee80211_hw *hw);
 void rtl92d_phy_config_macphymode_info(struct ieee80211_hw *hw);
index 0fe1a48593e8b90dd797335efed9fd9e38598ef6..7303d12c266fbe2ed44d55aa7e46f689e1acc2c2 100644 (file)
 #ifndef __RTL92D_RF_H__
 #define __RTL92D_RF_H__
 
-extern void rtl92d_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw,
-                                           u8 bandwidth);
-extern void rtl92d_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
-                                             u8 *ppowerlevel);
-extern void rtl92d_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
-                                              u8 *ppowerlevel, u8 channel);
-extern bool rtl92d_phy_rf6052_config(struct ieee80211_hw *hw);
-extern bool rtl92d_phy_enable_anotherphy(struct ieee80211_hw *hw, bool bmac0);
-extern void rtl92d_phy_powerdown_anotherphy(struct ieee80211_hw *hw,
-                                           bool bmac0);
+void rtl92d_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth);
+void rtl92d_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+                                      u8 *ppowerlevel);
+void rtl92d_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
+                                       u8 *ppowerlevel, u8 channel);
+bool rtl92d_phy_rf6052_config(struct ieee80211_hw *hw);
+bool rtl92d_phy_enable_anotherphy(struct ieee80211_hw *hw, bool bmac0);
+void rtl92d_phy_powerdown_anotherphy(struct ieee80211_hw *hw, bool bmac0);
 
 #endif
index e7a59eba351adf85cd951f28377517b8347a6807..bbb950dac5ba1bdd4739810a7aaeafb6e3232613 100644 (file)
@@ -183,42 +183,39 @@ struct tx_power_struct {
        u32 mcs_original_offset[4][16];
 };
 
-extern u32 rtl8723ae_phy_query_bb_reg(struct ieee80211_hw *hw,
-                                     u32 regaddr, u32 bitmask);
-extern void rtl8723ae_phy_set_bb_reg(struct ieee80211_hw *hw,
-                                    u32 regaddr, u32 bitmask, u32 data);
-extern u32 rtl8723ae_phy_query_rf_reg(struct ieee80211_hw *hw,
-                                     enum radio_path rfpath, u32 regaddr,
-                                     u32 bitmask);
-extern void rtl8723ae_phy_set_rf_reg(struct ieee80211_hw *hw,
-                                    enum radio_path rfpath, u32 regaddr,
-                                    u32 bitmask, u32 data);
-extern bool rtl8723ae_phy_mac_config(struct ieee80211_hw *hw);
-extern bool rtl8723ae_phy_bb_config(struct ieee80211_hw *hw);
-extern bool rtl8723ae_phy_rf_config(struct ieee80211_hw *hw);
-extern bool rtl92c_phy_config_rf_with_feaderfile(struct ieee80211_hw *hw,
-                                                enum radio_path rfpath);
-extern void rtl8723ae_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
-extern void rtl8723ae_phy_get_txpower_level(struct ieee80211_hw *hw,
-                                           long *powerlevel);
-extern void rtl8723ae_phy_set_txpower_level(struct ieee80211_hw *hw,
-                                           u8 channel);
-extern bool rtl8723ae_phy_update_txpower_dbm(struct ieee80211_hw *hw,
-                                            long power_indbm);
-extern void rtl8723ae_phy_scan_operation_backup(struct ieee80211_hw *hw,
-                                               u8 operation);
-extern void rtl8723ae_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
-extern void rtl8723ae_phy_set_bw_mode(struct ieee80211_hw *hw,
-                                     enum nl80211_channel_type ch_type);
-extern void rtl8723ae_phy_sw_chnl_callback(struct ieee80211_hw *hw);
-extern u8 rtl8723ae_phy_sw_chnl(struct ieee80211_hw *hw);
-extern void rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery);
+u32 rtl8723ae_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr,
+                              u32 bitmask);
+void rtl8723ae_phy_set_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask,
+                             u32 data);
+u32 rtl8723ae_phy_query_rf_reg(struct ieee80211_hw *hw,
+                              enum radio_path rfpath, u32 regaddr,
+                              u32 bitmask);
+void rtl8723ae_phy_set_rf_reg(struct ieee80211_hw *hw,
+                             enum radio_path rfpath, u32 regaddr, u32 bitmask,
+                             u32 data);
+bool rtl8723ae_phy_mac_config(struct ieee80211_hw *hw);
+bool rtl8723ae_phy_bb_config(struct ieee80211_hw *hw);
+bool rtl8723ae_phy_rf_config(struct ieee80211_hw *hw);
+bool rtl92c_phy_config_rf_with_feaderfile(struct ieee80211_hw *hw,
+                                         enum radio_path rfpath);
+void rtl8723ae_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
+void rtl8723ae_phy_get_txpower_level(struct ieee80211_hw *hw, long *powerlevel);
+void rtl8723ae_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel);
+bool rtl8723ae_phy_update_txpower_dbm(struct ieee80211_hw *hw,
+                                     long power_indbm);
+void rtl8723ae_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation);
+void rtl8723ae_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
+void rtl8723ae_phy_set_bw_mode(struct ieee80211_hw *hw,
+                              enum nl80211_channel_type ch_type);
+void rtl8723ae_phy_sw_chnl_callback(struct ieee80211_hw *hw);
+u8 rtl8723ae_phy_sw_chnl(struct ieee80211_hw *hw);
+void rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery);
 void rtl8723ae_phy_lc_calibrate(struct ieee80211_hw *hw);
 void rtl8723ae_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain);
 bool rtl8723ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
                                             enum radio_path rfpath);
 bool rtl8723ae_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
-extern bool rtl8723ae_phy_set_rf_power_state(struct ieee80211_hw *hw,
-                                            enum rf_pwrstate rfpwr_state);
+bool rtl8723ae_phy_set_rf_power_state(struct ieee80211_hw *hw,
+                                     enum rf_pwrstate rfpwr_state);
 
 #endif
index d0f9dd79abea6f9f40ed12a1c8cc3eeea337bd68..57f1933ee663e48b2433ef887a0b361a107e9770 100644 (file)
 
 #define RF6052_MAX_TX_PWR              0x3F
 
-extern void rtl8723ae_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw,
-                                           u8 bandwidth);
-extern void rtl8723ae_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
-                                             u8 *ppowerlevel);
-extern void rtl8723ae_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
-                                              u8 *ppowerlevel, u8 channel);
-extern bool rtl8723ae_phy_rf6052_config(struct ieee80211_hw *hw);
+void rtl8723ae_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth);
+void rtl8723ae_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+                                         u8 *ppowerlevel);
+void rtl8723ae_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
+                                          u8 *ppowerlevel, u8 channel);
+bool rtl8723ae_phy_rf6052_config(struct ieee80211_hw *hw);
 
 #endif
index cc03e7c87cbe739c9d762a6462b1b0b6f21e1795..703258742d28eefd339bde254d30661c4175b1a9 100644 (file)
@@ -2057,7 +2057,7 @@ struct rtl_priv {
           that it points to the data allocated
           beyond  this structure like:
           rtl_pci_priv or rtl_usb_priv */
-       u8 priv[0];
+       u8 priv[0] __aligned(sizeof(void *));
 };
 
 #define rtl_priv(hw)           (((struct rtl_priv *)(hw)->priv))
index f3e591c611ded744ec0b6ea2cefe9b7dd3d568c7..d0b0feb035fb82da90ae8f380e314f423cac94aa 100644 (file)
 #include <asm/xen/hypercall.h>
 #include <asm/xen/page.h>
 
+/* SKB control block overlay is used to store useful information when
+ * doing guest RX.
+ */
+struct skb_cb_overlay {
+       int meta_slots_used;
+       int peek_slots_count;
+};
+
 /* Provide an option to disable split event channels at load time as
  * event channels are limited resource. Split event channels are
  * enabled by default.
@@ -212,49 +220,6 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)
        return false;
 }
 
-struct xenvif_count_slot_state {
-       unsigned long copy_off;
-       bool head;
-};
-
-unsigned int xenvif_count_frag_slots(struct xenvif *vif,
-                                    unsigned long offset, unsigned long size,
-                                    struct xenvif_count_slot_state *state)
-{
-       unsigned count = 0;
-
-       offset &= ~PAGE_MASK;
-
-       while (size > 0) {
-               unsigned long bytes;
-
-               bytes = PAGE_SIZE - offset;
-
-               if (bytes > size)
-                       bytes = size;
-
-               if (start_new_rx_buffer(state->copy_off, bytes, state->head)) {
-                       count++;
-                       state->copy_off = 0;
-               }
-
-               if (state->copy_off + bytes > MAX_BUFFER_OFFSET)
-                       bytes = MAX_BUFFER_OFFSET - state->copy_off;
-
-               state->copy_off += bytes;
-
-               offset += bytes;
-               size -= bytes;
-
-               if (offset == PAGE_SIZE)
-                       offset = 0;
-
-               state->head = false;
-       }
-
-       return count;
-}
-
 /*
  * Figure out how many ring slots we're going to need to send @skb to
  * the guest. This function is essentially a dry run of
@@ -262,40 +227,53 @@ unsigned int xenvif_count_frag_slots(struct xenvif *vif,
  */
 unsigned int xenvif_count_skb_slots(struct xenvif *vif, struct sk_buff *skb)
 {
-       struct xenvif_count_slot_state state;
        unsigned int count;
-       unsigned char *data;
-       unsigned i;
+       int i, copy_off;
+       struct skb_cb_overlay *sco;
 
-       state.head = true;
-       state.copy_off = 0;
+       count = DIV_ROUND_UP(skb_headlen(skb), PAGE_SIZE);
 
-       /* Slot for the first (partial) page of data. */
-       count = 1;
+       copy_off = skb_headlen(skb) % PAGE_SIZE;
 
-       /* Need a slot for the GSO prefix for GSO extra data? */
        if (skb_shinfo(skb)->gso_size)
                count++;
 
-       data = skb->data;
-       while (data < skb_tail_pointer(skb)) {
-               unsigned long offset = offset_in_page(data);
-               unsigned long size = PAGE_SIZE - offset;
+       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+               unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
+               unsigned long offset = skb_shinfo(skb)->frags[i].page_offset;
+               unsigned long bytes;
 
-               if (data + size > skb_tail_pointer(skb))
-                       size = skb_tail_pointer(skb) - data;
+               offset &= ~PAGE_MASK;
 
-               count += xenvif_count_frag_slots(vif, offset, size, &state);
+               while (size > 0) {
+                       BUG_ON(offset >= PAGE_SIZE);
+                       BUG_ON(copy_off > MAX_BUFFER_OFFSET);
 
-               data += size;
-       }
+                       bytes = PAGE_SIZE - offset;
 
-       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-               unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
-               unsigned long offset = skb_shinfo(skb)->frags[i].page_offset;
+                       if (bytes > size)
+                               bytes = size;
+
+                       if (start_new_rx_buffer(copy_off, bytes, 0)) {
+                               count++;
+                               copy_off = 0;
+                       }
 
-               count += xenvif_count_frag_slots(vif, offset, size, &state);
+                       if (copy_off + bytes > MAX_BUFFER_OFFSET)
+                               bytes = MAX_BUFFER_OFFSET - copy_off;
+
+                       copy_off += bytes;
+
+                       offset += bytes;
+                       size -= bytes;
+
+                       if (offset == PAGE_SIZE)
+                               offset = 0;
+               }
        }
+
+       sco = (struct skb_cb_overlay *)skb->cb;
+       sco->peek_slots_count = count;
        return count;
 }
 
@@ -327,14 +305,11 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif *vif,
        return meta;
 }
 
-/*
- * Set up the grant operations for this fragment. If it's a flipping
- * interface, we also set up the unmap request from here.
- */
+/* Set up the grant operations for this fragment. */
 static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
                                 struct netrx_pending_operations *npo,
                                 struct page *page, unsigned long size,
-                                unsigned long offset, int *head)
+                                unsigned long offset, int head, int *first)
 {
        struct gnttab_copy *copy_gop;
        struct xenvif_rx_meta *meta;
@@ -358,12 +333,12 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
                if (bytes > size)
                        bytes = size;
 
-               if (start_new_rx_buffer(npo->copy_off, bytes, *head)) {
+               if (start_new_rx_buffer(npo->copy_off, bytes, head)) {
                        /*
                         * Netfront requires there to be some data in the head
                         * buffer.
                         */
-                       BUG_ON(*head);
+                       BUG_ON(*first);
 
                        meta = get_next_rx_buffer(vif, npo);
                }
@@ -397,10 +372,10 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
                }
 
                /* Leave a gap for the GSO descriptor. */
-               if (*head && skb_shinfo(skb)->gso_size && !vif->gso_prefix)
+               if (*first && skb_shinfo(skb)->gso_size && !vif->gso_prefix)
                        vif->rx.req_cons++;
 
-               *head = 0; /* There must be something in this buffer now. */
+               *first = 0; /* There must be something in this buffer now. */
 
        }
 }
@@ -426,7 +401,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
        struct xen_netif_rx_request *req;
        struct xenvif_rx_meta *meta;
        unsigned char *data;
-       int head = 1;
+       int first = 1;
        int old_meta_prod;
 
        old_meta_prod = npo->meta_prod;
@@ -462,7 +437,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
                        len = skb_tail_pointer(skb) - data;
 
                xenvif_gop_frag_copy(vif, skb, npo,
-                                    virt_to_page(data), len, offset, &head);
+                                    virt_to_page(data), len, offset, 1, &first);
                data += len;
        }
 
@@ -471,7 +446,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
                                     skb_frag_page(&skb_shinfo(skb)->frags[i]),
                                     skb_frag_size(&skb_shinfo(skb)->frags[i]),
                                     skb_shinfo(skb)->frags[i].page_offset,
-                                    &head);
+                                    0, &first);
        }
 
        return npo->meta_prod - old_meta_prod;
@@ -529,10 +504,6 @@ static void xenvif_add_frag_responses(struct xenvif *vif, int status,
        }
 }
 
-struct skb_cb_overlay {
-       int meta_slots_used;
-};
-
 static void xenvif_kick_thread(struct xenvif *vif)
 {
        wake_up(&vif->wq);
@@ -563,19 +534,26 @@ void xenvif_rx_action(struct xenvif *vif)
        count = 0;
 
        while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) {
+               RING_IDX old_rx_req_cons;
+
                vif = netdev_priv(skb->dev);
                nr_frags = skb_shinfo(skb)->nr_frags;
 
+               old_rx_req_cons = vif->rx.req_cons;
                sco = (struct skb_cb_overlay *)skb->cb;
                sco->meta_slots_used = xenvif_gop_skb(skb, &npo);
 
-               count += nr_frags + 1;
+               count += vif->rx.req_cons - old_rx_req_cons;
 
                __skb_queue_tail(&rxq, skb);
 
+               skb = skb_peek(&vif->rx_queue);
+               if (skb == NULL)
+                       break;
+               sco = (struct skb_cb_overlay *)skb->cb;
+
                /* Filled the batch queue? */
-               /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */
-               if (count + MAX_SKB_FRAGS >= XEN_NETIF_RX_RING_SIZE)
+               if (count + sco->peek_slots_count >= XEN_NETIF_RX_RING_SIZE)
                        break;
        }
 
index a53782ef154078717cbcff039a010e93e559fa58..b45bce20ad7624421b7500cff3ccfc5a49f9204f 100644 (file)
 struct backend_info {
        struct xenbus_device *dev;
        struct xenvif *vif;
+
+       /* This is the state that will be reflected in xenstore when any
+        * active hotplug script completes.
+        */
+       enum xenbus_state state;
+
        enum xenbus_state frontend_state;
        struct xenbus_watch hotplug_status_watch;
        u8 have_hotplug_status_watch:1;
@@ -136,6 +142,8 @@ static int netback_probe(struct xenbus_device *dev,
        if (err)
                goto fail;
 
+       be->state = XenbusStateInitWait;
+
        /* This kicks hotplug scripts, so do it immediately. */
        backend_create_xenvif(be);
 
@@ -208,24 +216,113 @@ static void backend_create_xenvif(struct backend_info *be)
        kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE);
 }
 
-
-static void disconnect_backend(struct xenbus_device *dev)
+static void backend_disconnect(struct backend_info *be)
 {
-       struct backend_info *be = dev_get_drvdata(&dev->dev);
-
        if (be->vif)
                xenvif_disconnect(be->vif);
 }
 
-static void destroy_backend(struct xenbus_device *dev)
+static void backend_connect(struct backend_info *be)
 {
-       struct backend_info *be = dev_get_drvdata(&dev->dev);
+       if (be->vif)
+               connect(be);
+}
 
-       if (be->vif) {
-               kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
-               xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status");
-               xenvif_free(be->vif);
-               be->vif = NULL;
+static inline void backend_switch_state(struct backend_info *be,
+                                       enum xenbus_state state)
+{
+       struct xenbus_device *dev = be->dev;
+
+       pr_debug("%s -> %s\n", dev->nodename, xenbus_strstate(state));
+       be->state = state;
+
+       /* If we are waiting for a hotplug script then defer the
+        * actual xenbus state change.
+        */
+       if (!be->have_hotplug_status_watch)
+               xenbus_switch_state(dev, state);
+}
+
+/* Handle backend state transitions:
+ *
+ * The backend state starts in InitWait and the following transitions are
+ * allowed.
+ *
+ * InitWait -> Connected
+ *
+ *    ^    \         |
+ *    |     \        |
+ *    |      \       |
+ *    |       \      |
+ *    |        \     |
+ *    |         \    |
+ *    |          V   V
+ *
+ *  Closed  <-> Closing
+ *
+ * The state argument specifies the eventual state of the backend and the
+ * function transitions to that state via the shortest path.
+ */
+static void set_backend_state(struct backend_info *be,
+                             enum xenbus_state state)
+{
+       while (be->state != state) {
+               switch (be->state) {
+               case XenbusStateClosed:
+                       switch (state) {
+                       case XenbusStateInitWait:
+                       case XenbusStateConnected:
+                               pr_info("%s: prepare for reconnect\n",
+                                       be->dev->nodename);
+                               backend_switch_state(be, XenbusStateInitWait);
+                               break;
+                       case XenbusStateClosing:
+                               backend_switch_state(be, XenbusStateClosing);
+                               break;
+                       default:
+                               BUG();
+                       }
+                       break;
+               case XenbusStateInitWait:
+                       switch (state) {
+                       case XenbusStateConnected:
+                               backend_connect(be);
+                               backend_switch_state(be, XenbusStateConnected);
+                               break;
+                       case XenbusStateClosing:
+                       case XenbusStateClosed:
+                               backend_switch_state(be, XenbusStateClosing);
+                               break;
+                       default:
+                               BUG();
+                       }
+                       break;
+               case XenbusStateConnected:
+                       switch (state) {
+                       case XenbusStateInitWait:
+                       case XenbusStateClosing:
+                       case XenbusStateClosed:
+                               backend_disconnect(be);
+                               backend_switch_state(be, XenbusStateClosing);
+                               break;
+                       default:
+                               BUG();
+                       }
+                       break;
+               case XenbusStateClosing:
+                       switch (state) {
+                       case XenbusStateInitWait:
+                       case XenbusStateConnected:
+                       case XenbusStateClosed:
+                               backend_switch_state(be, XenbusStateClosed);
+                               break;
+                       default:
+                               BUG();
+                       }
+                       break;
+               default:
+                       BUG();
+               }
        }
 }
 
@@ -237,40 +334,33 @@ static void frontend_changed(struct xenbus_device *dev,
 {
        struct backend_info *be = dev_get_drvdata(&dev->dev);
 
-       pr_debug("frontend state %s\n", xenbus_strstate(frontend_state));
+       pr_debug("%s -> %s\n", dev->otherend, xenbus_strstate(frontend_state));
 
        be->frontend_state = frontend_state;
 
        switch (frontend_state) {
        case XenbusStateInitialising:
-               if (dev->state == XenbusStateClosed) {
-                       pr_info("%s: prepare for reconnect\n", dev->nodename);
-                       xenbus_switch_state(dev, XenbusStateInitWait);
-               }
+               set_backend_state(be, XenbusStateInitWait);
                break;
 
        case XenbusStateInitialised:
                break;
 
        case XenbusStateConnected:
-               if (dev->state == XenbusStateConnected)
-                       break;
-               if (be->vif)
-                       connect(be);
+               set_backend_state(be, XenbusStateConnected);
                break;
 
        case XenbusStateClosing:
-               disconnect_backend(dev);
-               xenbus_switch_state(dev, XenbusStateClosing);
+               set_backend_state(be, XenbusStateClosing);
                break;
 
        case XenbusStateClosed:
-               xenbus_switch_state(dev, XenbusStateClosed);
+               set_backend_state(be, XenbusStateClosed);
                if (xenbus_dev_is_online(dev))
                        break;
-               destroy_backend(dev);
                /* fall through if not online */
        case XenbusStateUnknown:
+               set_backend_state(be, XenbusStateClosed);
                device_unregister(&dev->dev);
                break;
 
@@ -363,7 +453,9 @@ static void hotplug_status_changed(struct xenbus_watch *watch,
        if (IS_ERR(str))
                return;
        if (len == sizeof("connected")-1 && !memcmp(str, "connected", len)) {
-               xenbus_switch_state(be->dev, XenbusStateConnected);
+               /* Complete any pending state change */
+               xenbus_switch_state(be->dev, be->state);
+
                /* Not interested in this watch anymore. */
                unregister_hotplug_status_watch(be);
        }
@@ -393,12 +485,8 @@ static void connect(struct backend_info *be)
        err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch,
                                   hotplug_status_changed,
                                   "%s/%s", dev->nodename, "hotplug-status");
-       if (err) {
-               /* Switch now, since we can't do a watch. */
-               xenbus_switch_state(dev, XenbusStateConnected);
-       } else {
+       if (!err)
                be->have_hotplug_status_watch = 1;
-       }
 
        netif_wake_queue(be->vif->dev);
 }
index 36808bf256770a5e02e7674002ed389c7fd8ac19..dd1011e55cb598096ef7c2ad64981a2572b760d7 100644 (file)
@@ -952,7 +952,7 @@ static int handle_incoming_queue(struct net_device *dev,
                u64_stats_update_end(&stats->syncp);
 
                /* Pass it up. */
-               netif_receive_skb(skb);
+               napi_gro_receive(&np->napi, skb);
        }
 
        return packets_dropped;
@@ -1051,6 +1051,8 @@ err:
        if (work_done < budget) {
                int more_to_do = 0;
 
+               napi_gro_flush(napi, false);
+
                local_irq_save(flags);
 
                RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do);
index 7c29ee4ed0ae5559077b7b4efd69eb7ff2fd871a..b0299e6d9a3f2d1359f78454eb67e399ea6d3eb1 100644 (file)
@@ -47,6 +47,9 @@ static void pci_acpi_wake_dev(acpi_handle handle, u32 event, void *context)
        if (event != ACPI_NOTIFY_DEVICE_WAKE || !pci_dev)
                return;
 
+       if (pci_dev->pme_poll)
+               pci_dev->pme_poll = false;
+
        if (pci_dev->current_state == PCI_D3cold) {
                pci_wakeup_event(pci_dev);
                pm_runtime_resume(&pci_dev->dev);
@@ -57,9 +60,6 @@ static void pci_acpi_wake_dev(acpi_handle handle, u32 event, void *context)
        if (pci_dev->pme_support)
                pci_check_pme_status(pci_dev);
 
-       if (pci_dev->pme_poll)
-               pci_dev->pme_poll = false;
-
        pci_wakeup_event(pci_dev);
        pm_runtime_resume(&pci_dev->dev);
 
index e8ccf6c0f08a3d4d5a9b03aec428efddcba77c6f..bdd64b1b4817f7223fa31db19a118db5a9caaaa5 100644 (file)
@@ -1155,8 +1155,14 @@ static void pci_enable_bridge(struct pci_dev *dev)
 
        pci_enable_bridge(dev->bus->self);
 
-       if (pci_is_enabled(dev))
+       if (pci_is_enabled(dev)) {
+               if (!dev->is_busmaster) {
+                       dev_warn(&dev->dev, "driver skip pci_set_master, fix it!\n");
+                       pci_set_master(dev);
+               }
                return;
+       }
+
        retval = pci_enable_device(dev);
        if (retval)
                dev_err(&dev->dev, "Error enabling bridge (%d), continuing\n",
index 1a78163907734be9e1e49bd3bca6e2fc19e7b6c7..b9f2653e4ef90f315de25752fe24821a98497848 100644 (file)
@@ -709,7 +709,7 @@ static struct da9063_regulators_pdata *da9063_parse_regulators_dt(
                struct of_regulator_match **da9063_reg_matches)
 {
        da9063_reg_matches = NULL;
-       return PTR_ERR(-ENODEV);
+       return ERR_PTR(-ENODEV);
 }
 #endif
 
index 488dfe7ce9a6c048a751029451df59d6f6f2c074..7e2b165972e6edd6e3eee6b26478cb263db5d048 100644 (file)
@@ -201,13 +201,7 @@ static unsigned int palmas_smps_ramp_delay[4] = {0, 10000, 5000, 2500};
 #define SMPS_CTRL_MODE_ECO             0x02
 #define SMPS_CTRL_MODE_PWM             0x03
 
-/* These values are derived from the data sheet. And are the number of steps
- * where there is a voltage change, the ranges at beginning and end of register
- * max/min values where there are no change are ommitted.
- *
- * So they are basically (maxV-minV)/stepV
- */
-#define PALMAS_SMPS_NUM_VOLTAGES       117
+#define PALMAS_SMPS_NUM_VOLTAGES       122
 #define PALMAS_SMPS10_NUM_VOLTAGES     2
 #define PALMAS_LDO_NUM_VOLTAGES                50
 
@@ -979,6 +973,7 @@ static int palmas_regulators_probe(struct platform_device *pdev)
                        pmic->desc[id].min_uV = 900000;
                        pmic->desc[id].uV_step = 50000;
                        pmic->desc[id].linear_min_sel = 1;
+                       pmic->desc[id].enable_time = 500;
                        pmic->desc[id].vsel_reg =
                                        PALMAS_BASE_TO_REG(PALMAS_LDO_BASE,
                                                palmas_regs_info[id].vsel_addr);
@@ -997,6 +992,11 @@ static int palmas_regulators_probe(struct platform_device *pdev)
                                pmic->desc[id].min_uV = 450000;
                                pmic->desc[id].uV_step = 25000;
                        }
+
+                       /* LOD6 in vibrator mode will have enable time 2000us */
+                       if (pdata && pdata->ldo6_vibrator &&
+                               (id == PALMAS_REG_LDO6))
+                               pmic->desc[id].enable_time = 2000;
                } else {
                        pmic->desc[id].n_voltages = 1;
                        pmic->desc[id].ops = &palmas_ops_extreg;
index d8e3e1262bc2960b4eebf89b5c552ef6e04df305..20c271d49dcbbd358e03d5140d05e9da4ca54952 100644 (file)
@@ -279,8 +279,12 @@ static int ti_abb_set_opp(struct regulator_dev *rdev, struct ti_abb *abb,
        ti_abb_rmw(regs->opp_sel_mask, info->opp_sel, regs->control_reg,
                   abb->base);
 
-       /* program LDO VBB vset override if needed */
-       if (abb->ldo_base)
+       /*
+        * program LDO VBB vset override if needed for !bypass mode
+        * XXX: Do not switch sequence - for !bypass, LDO override reset *must*
+        * be performed *before* switch to bias mode else VBB glitches.
+        */
+       if (abb->ldo_base && info->opp_sel != TI_ABB_NOMINAL_OPP)
                ti_abb_program_ldovbb(dev, abb, info);
 
        /* Initiate ABB ldo change */
@@ -295,6 +299,14 @@ static int ti_abb_set_opp(struct regulator_dev *rdev, struct ti_abb *abb,
        if (ret)
                goto out;
 
+       /*
+        * Reset LDO VBB vset override bypass mode
+        * XXX: Do not switch sequence - for bypass, LDO override reset *must*
+        * be performed *after* switch to bypass else VBB glitches.
+        */
+       if (abb->ldo_base && info->opp_sel == TI_ABB_NOMINAL_OPP)
+               ti_abb_program_ldovbb(dev, abb, info);
+
 out:
        return ret;
 }
index 1432b26ef2e97b0830a2ecf973789ee20b7991cc..2205fbc2c37b4cf9b917bafa5d1f583f7e767b49 100644 (file)
@@ -63,7 +63,7 @@ static irqreturn_t wm831x_ldo_uv_irq(int irq, void *data)
  */
 
 static const struct regulator_linear_range wm831x_gp_ldo_ranges[] = {
-       { .min_uV =  900000, .max_uV = 1650000, .min_sel =  0, .max_sel = 14,
+       { .min_uV =  900000, .max_uV = 1600000, .min_sel =  0, .max_sel = 14,
          .uV_step =  50000 },
        { .min_uV = 1700000, .max_uV = 3300000, .min_sel = 15, .max_sel = 31,
          .uV_step = 100000 },
@@ -332,7 +332,7 @@ static struct platform_driver wm831x_gp_ldo_driver = {
  */
 
 static const struct regulator_linear_range wm831x_aldo_ranges[] = {
-       { .min_uV = 1000000, .max_uV = 1650000, .min_sel =  0, .max_sel = 12,
+       { .min_uV = 1000000, .max_uV = 1600000, .min_sel =  0, .max_sel = 12,
          .uV_step =  50000 },
        { .min_uV = 1700000, .max_uV = 3500000, .min_sel = 13, .max_sel = 31,
          .uV_step = 100000 },
index 835b5f0f344ed2537115b1ed486d14860f88434e..61ca9292a42944229f9f52bc6f5b8943b6414b22 100644 (file)
@@ -543,7 +543,7 @@ static int wm8350_dcdc_set_suspend_mode(struct regulator_dev *rdev,
 }
 
 static const struct regulator_linear_range wm8350_ldo_ranges[] = {
-       { .min_uV =  900000, .max_uV = 1750000, .min_sel =  0, .max_sel = 15,
+       { .min_uV =  900000, .max_uV = 1650000, .min_sel =  0, .max_sel = 15,
          .uV_step =  50000 },
        { .min_uV = 1800000, .max_uV = 3300000, .min_sel = 16, .max_sel = 31,
          .uV_step = 100000 },
index a84aab47a11315b137d370a9337a8f5d722bb644..f73287eab373a92b7f1a9876c3e53eb7f2cee327 100644 (file)
@@ -96,6 +96,15 @@ config COMEDI_SKEL
          To compile this driver as a module, choose M here: the module will be
          called skel.
 
+config COMEDI_SSV_DNP
+       tristate "SSV Embedded Systems DIL/Net-PC support"
+       depends on X86_32 || COMPILE_TEST
+       ---help---
+         Enable support for SSV Embedded Systems DIL/Net-PC
+
+         To compile this driver as a module, choose M here: the module will be
+         called ssv_dnp.
+
 endif # COMEDI_MISC_DRIVERS
 
 menuconfig COMEDI_ISA_DRIVERS
@@ -386,6 +395,14 @@ config COMEDI_DMM32AT
          To compile this driver as a module, choose M here: the module will be
          called dmm32at.
 
+config COMEDI_UNIOXX5
+       tristate "Fastwel UNIOxx-5 analog and digital io board support"
+       ---help---
+         Enable support for Fastwel UNIOxx-5 (analog and digital i/o) boards
+
+         To compile this driver as a module, choose M here: the module will be
+         called unioxx5.
+
 config COMEDI_FL512
        tristate "FL512 ISA card support"
        ---help---
@@ -855,14 +872,6 @@ config COMEDI_DYNA_PCI10XX
          To compile this driver as a module, choose M here: the module will be
          called dyna_pci10xx.
 
-config COMEDI_UNIOXX5
-       tristate "Fastwel UNIOxx-5 analog and digital io board support"
-       ---help---
-         Enable support for Fastwel UNIOxx-5 (analog and digital i/o) boards
-
-         To compile this driver as a module, choose M here: the module will be
-         called unioxx5.
-
 config COMEDI_GSC_HPDI
        tristate "General Standards PCI-HPDI32 / PMC-HPDI32 support"
        select COMEDI_FC
@@ -1085,14 +1094,6 @@ config COMEDI_S626
          To compile this driver as a module, choose M here: the module will be
          called s626.
 
-config COMEDI_SSV_DNP
-       tristate "SSV Embedded Systems DIL/Net-PC support"
-       ---help---
-         Enable support for SSV Embedded Systems DIL/Net-PC
-
-         To compile this driver as a module, choose M here: the module will be
-         called ssv_dnp.
-
 config COMEDI_MITE
        depends on HAS_DMA
        tristate
index 724a685753dd8671fba84c9f3e14174164696e64..40ef785a04284d9a4c79633f2877a957b8c405dc 100644 (file)
@@ -474,7 +474,7 @@ static void dgap_cleanup_board(struct board_t *brd)
 
                 DGAP_LOCK(dgap_global_lock, flags);
                 brd->msgbuf = NULL;
-                printk(brd->msgbuf_head);
+                printk("%s", brd->msgbuf_head);
                 kfree(brd->msgbuf_head);
                 brd->msgbuf_head = NULL;
                 DGAP_UNLOCK(dgap_global_lock, flags);
@@ -628,7 +628,7 @@ static int dgap_found_board(struct pci_dev *pdev, int id)
        DPR_INIT(("dgap_scan(%d) - printing out the msgbuf\n", i));
        DGAP_LOCK(dgap_global_lock, flags);
        brd->msgbuf = NULL;
-       printk(brd->msgbuf_head);
+       printk("%s", brd->msgbuf_head);
        kfree(brd->msgbuf_head);
        brd->msgbuf_head = NULL;
        DGAP_UNLOCK(dgap_global_lock, flags);
@@ -955,25 +955,28 @@ static void dgap_mbuf(struct board_t *brd, const char *fmt, ...) {
        char            buf[1024];
        int             i;
        unsigned long   flags;
+       size_t          length;
 
        DGAP_LOCK(dgap_global_lock, flags);
 
        /* Format buf using fmt and arguments contained in ap. */
        va_start(ap, fmt);
-       i = vsprintf(buf, fmt,  ap);
+       i = vsnprintf(buf, sizeof(buf), fmt,  ap);
        va_end(ap);
 
        DPR((buf));
 
        if (!brd || !brd->msgbuf) {
-               printk(buf);
+               printk("%s", buf);
                DGAP_UNLOCK(dgap_global_lock, flags);
                return;
        }
 
-       memcpy(brd->msgbuf, buf, strlen(buf));
-       brd->msgbuf += strlen(buf);
-       *brd->msgbuf = 0;
+       length = strlen(buf) + 1;
+       if (brd->msgbuf - brd->msgbuf_head < length)
+               length = brd->msgbuf - brd->msgbuf_head;
+       memcpy(brd->msgbuf, buf, length);
+       brd->msgbuf += length;
 
        DGAP_UNLOCK(dgap_global_lock, flags);
 }
index f8c1e22585d6cf81f3a4a52262b73608ab68f333..71d2b83cc3a12e365823df2e40fd176ac26120ef 100644 (file)
@@ -454,7 +454,7 @@ static void dgnc_cleanup_board(struct board_t *brd)
 
                DGNC_LOCK(dgnc_global_lock, flags);
                brd->msgbuf = NULL;
-               printk(brd->msgbuf_head);
+               printk("%s", brd->msgbuf_head);
                kfree(brd->msgbuf_head);
                brd->msgbuf_head = NULL;
                DGNC_UNLOCK(dgnc_global_lock, flags);
@@ -710,7 +710,7 @@ static int dgnc_found_board(struct pci_dev *pdev, int id)
        DPR_INIT(("dgnc_scan(%d) - printing out the msgbuf\n", i));
        DGNC_LOCK(dgnc_global_lock, flags);
        brd->msgbuf = NULL;
-       printk(brd->msgbuf_head);
+       printk("%s", brd->msgbuf_head);
        kfree(brd->msgbuf_head);
        brd->msgbuf_head = NULL;
        DGNC_UNLOCK(dgnc_global_lock, flags);
index db4d6dc032432cf0d92ff71d6c38e52dff17ce13..b36feb080cba886f8ebf9a456c519247376dbdcb 100644 (file)
@@ -37,7 +37,7 @@ config IIO_SIMPLE_DUMMY_EVENTS
 
 config IIO_SIMPLE_DUMMY_BUFFER
        boolean "Buffered capture support"
-       depends on IIO_KFIFO_BUF
+       select IIO_KFIFO_BUF
        help
          Add buffered data capture to the simple dummy driver.
 
index 351936c3efd698e8c5cbe76d7878eb8ad16c311f..e4998e4d4434b2af9365aacc1a8c4137a4c70544 100644 (file)
@@ -563,6 +563,7 @@ static int isl29018_probe(struct i2c_client *client,
        mutex_init(&chip->lock);
 
        chip->lux_scale = 1;
+       chip->lux_uscale = 0;
        chip->range = 1000;
        chip->adc_bit = 16;
        chip->suspended = false;
index d2748c329eae6728b9c88c6b3a63065982d78555..c3f3f539e787e926b54f793958d8e9dac13aa6d7 100644 (file)
@@ -229,7 +229,7 @@ static int hmc5843_read_measurement(struct iio_dev *indio_dev,
        if (result < 0)
                return -EINVAL;
 
-       *val = result;
+       *val = sign_extend32(result, 15);
        return IIO_VAL_INT;
 }
 
index a802cf2491d61a6d55f95c6e6825792905fa118a..4c6d2041260bed259f7cba174c64ceb9a9c1a3f8 100644 (file)
@@ -299,7 +299,7 @@ static int ade7854_spi_probe(struct spi_device *spi)
        if (ret)
                iio_device_free(indio_dev);
 
-       return 0;
+       return ret;
 }
 
 static int ade7854_spi_remove(struct spi_device *spi)
index 47c5888461ffbeb3f4aa994af5b2418df6506e60..a2e52a0c53c981690dc7b711a1a97625c01a9a0f 100644 (file)
@@ -41,7 +41,6 @@ struct imx_drm_device {
        struct list_head                        encoder_list;
        struct list_head                        connector_list;
        struct mutex                            mutex;
-       int                                     references;
        int                                     pipes;
        struct drm_fbdev_cma                    *fbhelper;
 };
@@ -241,8 +240,6 @@ struct drm_device *imx_drm_device_get(void)
                }
        }
 
-       imxdrm->references++;
-
        return imxdrm->drm;
 
 unwind_crtc:
@@ -280,8 +277,6 @@ void imx_drm_device_put(void)
        list_for_each_entry(enc, &imxdrm->encoder_list, list)
                module_put(enc->owner);
 
-       imxdrm->references--;
-
        mutex_unlock(&imxdrm->mutex);
 }
 EXPORT_SYMBOL_GPL(imx_drm_device_put);
@@ -485,7 +480,7 @@ int imx_drm_add_crtc(struct drm_crtc *crtc,
 
        mutex_lock(&imxdrm->mutex);
 
-       if (imxdrm->references) {
+       if (imxdrm->drm->open_count) {
                ret = -EBUSY;
                goto err_busy;
        }
@@ -564,7 +559,7 @@ int imx_drm_add_encoder(struct drm_encoder *encoder,
 
        mutex_lock(&imxdrm->mutex);
 
-       if (imxdrm->references) {
+       if (imxdrm->drm->open_count) {
                ret = -EBUSY;
                goto err_busy;
        }
@@ -709,7 +704,7 @@ int imx_drm_add_connector(struct drm_connector *connector,
 
        mutex_lock(&imxdrm->mutex);
 
-       if (imxdrm->references) {
+       if (imxdrm->drm->open_count) {
                ret = -EBUSY;
                goto err_busy;
        }
index 2f44d56700af1eabe13625777b3cc3ce8c94cf20..776d3632dc7d52988738b105d648161b2e99b9a2 100644 (file)
@@ -244,13 +244,17 @@ static int snd_toneport_source_put(struct snd_kcontrol *kcontrol,
        struct snd_line6_pcm *line6pcm = snd_kcontrol_chip(kcontrol);
        struct usb_line6_toneport *toneport =
            (struct usb_line6_toneport *)line6pcm->line6;
+       unsigned int source;
 
-       if (ucontrol->value.enumerated.item[0] == toneport->source)
+       source = ucontrol->value.enumerated.item[0];
+       if (source >= ARRAY_SIZE(toneport_source_info))
+               return -EINVAL;
+       if (source == toneport->source)
                return 0;
 
-       toneport->source = ucontrol->value.enumerated.item[0];
+       toneport->source = source;
        toneport_send_cmd(toneport->line6.usbdev,
-                         toneport_source_info[toneport->source].code, 0x0000);
+                         toneport_source_info[source].code, 0x0000);
        return 1;
 }
 
index 086ca3d7241b2b756a650bc647ab9450461f1a4a..26b49a24b3dfe32997e7d37cb479f2d98b91ce82 100644 (file)
@@ -1802,7 +1802,7 @@ kiblnd_recv (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
 int
 kiblnd_thread_start(int (*fn)(void *arg), void *arg, char *name)
 {
-       struct task_struct *task = kthread_run(fn, arg, name);
+       struct task_struct *task = kthread_run(fn, arg, "%s", name);
 
        if (IS_ERR(task))
                return PTR_ERR(task);
index 2c581b7fa8adee363d19791007f4696e9e439d06..68a4f52ec998c14795d6f356e807b798c2dfa794 100644 (file)
@@ -1005,7 +1005,7 @@ ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
 int
 ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name)
 {
-       struct task_struct *task = kthread_run(fn, arg, name);
+       struct task_struct *task = kthread_run(fn, arg, "%s", name);
 
        if (IS_ERR(task))
                return PTR_ERR(task);
index 4e898e4918605bf058554d6cae15b6a5e31e550a..2156a44d07409c0501cba726ccb33ee99d1d9c92 100644 (file)
@@ -1,6 +1,6 @@
 config LUSTRE_FS
        tristate "Lustre file system client support"
-       depends on INET && m
+       depends on INET && m && !MIPS && !XTENSA && !SUPERH
        select LNET
        select CRYPTO
        select CRYPTO_CRC32
@@ -52,7 +52,7 @@ config LUSTRE_DEBUG_EXPENSIVE_CHECK
 config LUSTRE_TRANSLATE_ERRNOS
        bool
        depends on LUSTRE_FS && !X86
-       default true
+       default y
 
 config LUSTRE_LLITE_LLOOP
        bool "Lustre virtual block device"
index 3916bda3004cf23c7fa58c7e51de5e621909cfc5..a100a0b96381d6ef0a55242c969ab41b7adb05be 100644 (file)
@@ -800,9 +800,9 @@ static int ldlm_bl_thread_start(struct ldlm_bl_pool *blp)
 
        init_completion(&bltd.bltd_comp);
        bltd.bltd_num = atomic_read(&blp->blp_num_threads);
-       snprintf(bltd.bltd_name, sizeof(bltd.bltd_name) - 1,
+       snprintf(bltd.bltd_name, sizeof(bltd.bltd_name),
                "ldlm_bl_%02d", bltd.bltd_num);
-       task = kthread_run(ldlm_bl_thread_main, &bltd, bltd.bltd_name);
+       task = kthread_run(ldlm_bl_thread_main, &bltd, "%s", bltd.bltd_name);
        if (IS_ERR(task)) {
                CERROR("cannot start LDLM thread ldlm_bl_%02d: rc %ld\n",
                       atomic_read(&blp->blp_num_threads), PTR_ERR(task));
index 462172d1a7569a74c1ffbae3dd0c0f133574aa2e..1a55c81892e0e8f692c167d17f0296814ad806dc 100644 (file)
@@ -397,7 +397,7 @@ cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab,
                                 sched->ws_name, sched->ws_nthreads);
                }
 
-               task = kthread_run(cfs_wi_scheduler, sched, name);
+               task = kthread_run(cfs_wi_scheduler, sched, "%s", name);
                if (!IS_ERR(task)) {
                        nthrs--;
                        continue;
index 2644edf438c1e175b016a54e96b6d93bd5852cc7..c8b43442dc74be8c8cf1ee3f5b38470f63617e71 100644 (file)
@@ -1387,7 +1387,7 @@ echo_copyout_lsm (struct lov_stripe_md *lsm, void *_ulsm, int ulsm_nob)
        if (nob > ulsm_nob)
                return (-EINVAL);
 
-       if (copy_to_user (ulsm, lsm, sizeof(ulsm)))
+       if (copy_to_user (ulsm, lsm, sizeof(*ulsm)))
                return (-EFAULT);
 
        for (i = 0; i < lsm->lsm_stripe_count; i++) {
index 227a0ae9593bc987d6bfbcf1757bc0a9eacf2288..5dec771d70eee8c08a6bc0b787f7f116ddd6906d 100644 (file)
@@ -383,8 +383,8 @@ int ptlrpc_start_pinger(void)
 
        /* CLONE_VM and CLONE_FILES just avoid a needless copy, because we
         * just drop the VM and FILES in cfs_daemonize_ctxt() right away. */
-       rc = PTR_ERR(kthread_run(ptlrpc_pinger_main,
-                                &pinger_thread, pinger_thread.t_name));
+       rc = PTR_ERR(kthread_run(ptlrpc_pinger_main, &pinger_thread,
+                                "%s", pinger_thread.t_name));
        if (IS_ERR_VALUE(rc)) {
                CERROR("cannot start thread: %d\n", rc);
                return rc;
index fbdeff65d059df66f057cec95f729a5d3ad6ad63..89c9be96f454a57c4dd0839cf92f378883dce91e 100644 (file)
@@ -615,7 +615,7 @@ int ptlrpcd_start(int index, int max, const char *name, struct ptlrpcd_ctl *pc)
        init_completion(&pc->pc_starting);
        init_completion(&pc->pc_finishing);
        spin_lock_init(&pc->pc_lock);
-       strncpy(pc->pc_name, name, sizeof(pc->pc_name) - 1);
+       strlcpy(pc->pc_name, name, sizeof(pc->pc_name));
        pc->pc_set = ptlrpc_prep_set();
        if (pc->pc_set == NULL)
                GOTO(out, rc = -ENOMEM);
@@ -638,7 +638,7 @@ int ptlrpcd_start(int index, int max, const char *name, struct ptlrpcd_ctl *pc)
                                GOTO(out, rc);
                }
 
-               task = kthread_run(ptlrpcd, pc, pc->pc_name);
+               task = kthread_run(ptlrpcd, pc, "%s", pc->pc_name);
                if (IS_ERR(task))
                        GOTO(out, rc = PTR_ERR(task));
 
@@ -745,7 +745,7 @@ static int ptlrpcd_init(void)
        if (ptlrpcds == NULL)
                GOTO(out, rc = -ENOMEM);
 
-       snprintf(name, 15, "ptlrpcd_rcv");
+       snprintf(name, sizeof(name), "ptlrpcd_rcv");
        set_bit(LIOD_RECOVERY, &ptlrpcds->pd_thread_rcv.pc_flags);
        rc = ptlrpcd_start(-1, nthreads, name, &ptlrpcds->pd_thread_rcv);
        if (rc < 0)
@@ -764,7 +764,7 @@ static int ptlrpcd_init(void)
         *      unnecessary dependency. But how to distribute async RPCs load
         *      among all the ptlrpc daemons becomes another trouble. */
        for (i = 0; i < nthreads; i++) {
-               snprintf(name, 15, "ptlrpcd_%d", i);
+               snprintf(name, sizeof(name), "ptlrpcd_%d", i);
                rc = ptlrpcd_start(i, nthreads, name, &ptlrpcds->pd_threads[i]);
                if (rc < 0)
                        GOTO(out, rc);
index e90c8fb7da6a73643531c1cd1914d5826a280c2d..6547f46a7729f2f95c16f9bdff5e334a43806eea 100644 (file)
@@ -59,8 +59,8 @@
  ****************************************/
 
 
-#define PTRS_PER_PAGE   (PAGE_CACHE_SIZE / sizeof(void *))
-#define PAGES_PER_POOL  (PTRS_PER_PAGE)
+#define POINTERS_PER_PAGE      (PAGE_CACHE_SIZE / sizeof(void *))
+#define PAGES_PER_POOL         (POINTERS_PER_PAGE)
 
 #define IDLE_IDX_MAX       (100)
 #define IDLE_IDX_WEIGHT         (3)
index ac8b5fd2300b5720e322137079895bdda5f71b52..acf75f3873d1ffb0ab9cc4258150b3a9a1b2e3ba 100644 (file)
@@ -2718,15 +2718,15 @@ int ptlrpc_start_thread(struct ptlrpc_service_part *svcpt, int wait)
        spin_unlock(&svcpt->scp_lock);
 
        if (svcpt->scp_cpt >= 0) {
-               snprintf(thread->t_name, PTLRPC_THR_NAME_LEN, "%s%02d_%03d",
+               snprintf(thread->t_name, sizeof(thread->t_name), "%s%02d_%03d",
                         svc->srv_thread_name, svcpt->scp_cpt, thread->t_id);
        } else {
-               snprintf(thread->t_name, PTLRPC_THR_NAME_LEN, "%s_%04d",
+               snprintf(thread->t_name, sizeof(thread->t_name), "%s_%04d",
                         svc->srv_thread_name, thread->t_id);
        }
 
        CDEBUG(D_RPCTRACE, "starting thread '%s'\n", thread->t_name);
-       rc = PTR_ERR(kthread_run(ptlrpc_main, thread, thread->t_name));
+       rc = PTR_ERR(kthread_run(ptlrpc_main, thread, "%s", thread->t_name));
        if (IS_ERR_VALUE(rc)) {
                CERROR("cannot start thread '%s': rc %d\n",
                       thread->t_name, rc);
index d7b3c82b5ead023bcc4ba85acdd0e307b0eecdc9..45dfe94199ae4ecd951a8142fb6f9140d226942c 100644 (file)
@@ -604,7 +604,7 @@ int cvmx_usb_initialize(struct cvmx_usb_state *state, int usb_port_number,
                        }
        }
 
-       memset(usb, 0, sizeof(usb));
+       memset(usb, 0, sizeof(*usb));
        usb->init_flags = flags;
 
        /* Initialize the USB state structure */
index 3605c5da822d953daa6275dda23b675e13b60d5c..6fc77428e83a0c389c51e6cca52ceaa6a8ff7dd9 100644 (file)
@@ -157,8 +157,8 @@ _func_enter_;
 
        *frlen = *frlen + (len + 2);
 
-       return pbuf + len + 2;
 _func_exit_;
+       return pbuf + len + 2;
 }
 
 inline u8 *rtw_set_ie_ch_switch (u8 *buf, u32 *buf_len, u8 ch_switch_mode,
index 8b2ba26ba38d9a44c090167cc9bd862367e92ade..4b2eb8e9b5620ab4529ec9ce8af252ef502a65fc 100644 (file)
@@ -1827,13 +1827,13 @@ unsigned int OnAction_back(struct adapter *padapter, union recv_frame *precv_fra
 
 #ifdef CONFIG_88EU_P2P
 
-static int get_reg_classes_full_count(struct p2p_channels channel_list)
+static int get_reg_classes_full_count(struct p2p_channels *channel_list)
 {
        int cnt = 0;
        int i;
 
-       for (i = 0; i < channel_list.reg_classes; i++) {
-               cnt += channel_list.reg_class[i].channels;
+       for (i = 0; i < channel_list->reg_classes; i++) {
+               cnt += channel_list->reg_class[i].channels;
        }
 
        return cnt;
@@ -2065,7 +2065,7 @@ void issue_p2p_GO_request(struct adapter *padapter, u8 *raddr)
        /*  + number of channels in all classes */
        len_channellist_attr = 3
           + (1 + 1) * (u16)(pmlmeext->channel_list.reg_classes)
-          + get_reg_classes_full_count(pmlmeext->channel_list);
+          + get_reg_classes_full_count(&pmlmeext->channel_list);
 
        *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(len_channellist_attr);
        p2pielen += 2;
@@ -2437,7 +2437,7 @@ static void issue_p2p_GO_response(struct adapter *padapter, u8 *raddr, u8 *frame
        /*  + number of channels in all classes */
        len_channellist_attr = 3
           + (1 + 1) * (u16)pmlmeext->channel_list.reg_classes
-          + get_reg_classes_full_count(pmlmeext->channel_list);
+          + get_reg_classes_full_count(&pmlmeext->channel_list);
 
        *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(len_channellist_attr);
 
@@ -2859,7 +2859,7 @@ void issue_p2p_invitation_request(struct adapter *padapter, u8 *raddr)
        /*  + number of channels in all classes */
        len_channellist_attr = 3
           + (1 + 1) * (u16)pmlmeext->channel_list.reg_classes
-          + get_reg_classes_full_count(pmlmeext->channel_list);
+          + get_reg_classes_full_count(&pmlmeext->channel_list);
 
        *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(len_channellist_attr);
 
@@ -3120,7 +3120,7 @@ void issue_p2p_invitation_response(struct adapter *padapter, u8 *raddr, u8 dialo
                /*  + number of channels in all classes */
                len_channellist_attr = 3
                        + (1 + 1) * (u16)pmlmeext->channel_list.reg_classes
-                       + get_reg_classes_full_count(pmlmeext->channel_list);
+                       + get_reg_classes_full_count(&pmlmeext->channel_list);
 
                *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(len_channellist_attr);
                p2pielen += 2;
index c7ff2e4d1f23fcc4567c747bf97beff945886862..9832dcbbd07fa621467dfc1c216411e627db7e66 100644 (file)
@@ -907,7 +907,7 @@ u32 mp_query_psd(struct adapter *pAdapter, u8 *data)
                sscanf(data, "pts =%d, start =%d, stop =%d", &psd_pts, &psd_start, &psd_stop);
        }
 
-       _rtw_memset(data, '\0', sizeof(data));
+       _rtw_memset(data, '\0', sizeof(*data));
 
        i = psd_start;
        while (i < psd_stop) {
index 013ea487e7acc2e57726b82e6e016d4aebf60b34..8018edd3d42e19fcc0875c7324584d94e6fb02ad 100644 (file)
@@ -631,7 +631,7 @@ void WMMOnAssocRsp(struct adapter *padapter)
        inx[0] = 0; inx[1] = 1; inx[2] = 2; inx[3] = 3;
 
        if (pregpriv->wifi_spec == 1) {
-               u32     j, tmp, change_inx;
+               u32     j, tmp, change_inx = false;
 
                /* entry indx: 0->vo, 1->vi, 2->be, 3->bk. */
                for (i = 0; i < 4; i++) {
index 9c2e7a20c09ea1ced218d303db5e1213837be751..ec0028d4e61a495cf577efe82fd7bffa0455fbbd 100644 (file)
@@ -57,7 +57,7 @@ static void Init_ODM_ComInfo_88E(struct adapter *Adapter)
        u8 cut_ver, fab_ver;
 
        /*  Init Value */
-       _rtw_memset(dm_odm, 0, sizeof(dm_odm));
+       _rtw_memset(dm_odm, 0, sizeof(*dm_odm));
 
        dm_odm->Adapter = Adapter;
 
index 2bfe72841921bd8fc76ec684130f691e5466e50a..4787bacdcad892da62bf66c05cadb851e090e09d 100644 (file)
@@ -1010,7 +1010,7 @@ enum dm_dig_op {
 #define                DM_false_ALARM_THRESH_LOW       400
 #define                DM_false_ALARM_THRESH_HIGH      1000
 
-#define                DM_DIG_MAX_NIC                  0x3e
+#define                DM_DIG_MAX_NIC                  0x4e
 #define                DM_DIG_MIN_NIC                  0x1e /* 0x22/0x1c */
 
 #define                DM_DIG_MAX_AP                   0x32
index 52b280165a926da8acccb981e539c787c6c422b9..555c801d2ded7e434a5a86335449d5be377796ee 100644 (file)
@@ -188,7 +188,7 @@ enum ChannelPlan {
 
 struct txpowerinfo24g {
        u8 IndexCCK_Base[MAX_RF_PATH][MAX_CHNL_GROUP_24G];
-       u8 IndexBW40_Base[MAX_RF_PATH][MAX_CHNL_GROUP_24G-1];
+       u8 IndexBW40_Base[MAX_RF_PATH][MAX_CHNL_GROUP_24G];
        /* If only one tx, only BW20 and OFDM are used. */
        s8 CCK_Diff[MAX_RF_PATH][MAX_TX_COUNT];
        s8 OFDM_Diff[MAX_RF_PATH][MAX_TX_COUNT];
index a96b018e5e6a3d7208867684983a58ede36c8e82..853ab80a2b860a0a95bfab8d85ba064ca4eef982 100644 (file)
@@ -870,6 +870,7 @@ static struct fwevent wlanevents[] = {
        {0, NULL},
        {0, NULL},
        {0, &rtw_cpwm_event_callback},
+       {0, NULL},
 };
 
 #endif/* _RTL_MLME_EXT_C_ */
index cd4100fb3645ffd1cc2fcb8ec1013c1fa76d30da..95953ebc027922e66ffd3168a965dee33f6a30be 100644 (file)
@@ -6973,7 +6973,7 @@ static int rtw_mp_ctx(struct net_device *dev,
        stop = strncmp(extra, "stop", 4);
        sscanf(extra, "count =%d, pkt", &count);
 
-       _rtw_memset(extra, '\0', sizeof(extra));
+       _rtw_memset(extra, '\0', sizeof(*extra));
 
        if (stop == 0) {
                bStartTest = 0; /*  To set Stop */
index d3078d200e505f4c4f24237eb202d80ae6e9cd69..9ca3180ebaa0e70fbb188c84748971fd4f34922d 100644 (file)
@@ -54,6 +54,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = {
        /*=== Customer ID ===*/
        /****** 8188EUS ********/
        {USB_DEVICE(0x8179, 0x07B8)}, /* Abocom - Abocom */
+       {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */
        {}      /* Terminating entry */
 };
 
index 5bc361b16d4ca05e924bd3a326824c852a67b8f0..56144014b7c9ba15126f97b073fb6eea4777937a 100644 (file)
@@ -37,6 +37,8 @@ rt_status SendTxCommandPacket(struct net_device *dev, void *pData, u32 DataLen)
        /* Get TCB and local buffer from common pool.
           (It is shared by CmdQ, MgntQ, and USB coalesce DataQ) */
        skb  = dev_alloc_skb(USB_HWDESC_HEADER_LEN + DataLen + 4);
+       if (!skb)
+               return RT_STATUS_FAILURE;
        memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev));
        tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
        tcb_desc->queue_index = TXCMD_QUEUE;
index dbf11ecb794ec60dd576578f9f5172d0f4448f10..19d3cf451b880c736c82cce806fdfb14d98b6379 100644 (file)
@@ -172,8 +172,8 @@ static u16 swGetOFDMControlRate(struct vnt_private *pDevice, u16 wRateIdx)
        if (!CARDbIsOFDMinBasicRate(pDevice)) {
                DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
                        "swGetOFDMControlRate:(NO OFDM) %d\n", wRateIdx);
-       if (wRateIdx > RATE_24M)
-               wRateIdx = RATE_24M;
+               if (wRateIdx > RATE_24M)
+                       wRateIdx = RATE_24M;
                return wRateIdx;
        }
 
index d0cf7d8a20e5640fe507f3ff1b1dc64c170f9408..8872e0f84f40e825efec5dd7f7614f6c22f43562 100644 (file)
@@ -1634,6 +1634,9 @@ int iwctl_siwencodeext(struct net_device *dev, struct iw_request_info *info,
        if (pMgmt == NULL)
                return -EFAULT;
 
+       if (!(pDevice->flags & DEVICE_FLAGS_OPENED))
+               return -ENODEV;
+
        buf = kzalloc(sizeof(struct viawget_wpa_param), GFP_KERNEL);
        if (buf == NULL)
                return -ENOMEM;
index 536971786ae8e4cb87c27b4705d006df0d5cbee6..6f9d28182445ce99b2d37bcd37aaae3c3d2bdc03 100644 (file)
@@ -1098,6 +1098,8 @@ static int device_close(struct net_device *dev)
     memset(pMgmt->abyCurrBSSID, 0, 6);
     pMgmt->eCurrState = WMAC_STATE_IDLE;
 
+       pDevice->flags &= ~DEVICE_FLAGS_OPENED;
+
     device_free_tx_bufs(pDevice);
     device_free_rx_bufs(pDevice);
     device_free_int_bufs(pDevice);
@@ -1109,7 +1111,6 @@ static int device_close(struct net_device *dev)
     usb_free_urb(pDevice->pInterruptURB);
 
     BSSvClearNodeDBTable(pDevice, 0);
-    pDevice->flags &=(~DEVICE_FLAGS_OPENED);
 
     DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "device_close2 \n");
 
index fb743a8811bbc9c0c8c37d23bde37a96b863d735..14f3e852215da5fb27702b4b91687047b9a290c2 100644 (file)
@@ -148,6 +148,8 @@ static void *s_vGetFreeContext(struct vnt_private *pDevice)
     DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"GetFreeContext()\n");
 
     for (ii = 0; ii < pDevice->cbTD; ii++) {
+       if (!pDevice->apTD[ii])
+               return NULL;
         pContext = pDevice->apTD[ii];
         if (pContext->bBoolInUse == false) {
             pContext->bBoolInUse = true;
index efc56987a60b53a0a409277da639a95b128f5304..7db6f03a00540cd8c2a131ab7039fec6251b72cc 100644 (file)
@@ -2054,7 +2054,7 @@ static int xillybus_init_chrdev(struct xilly_endpoint *endpoint,
                                       NULL,
                                       MKDEV(major, i),
                                       NULL,
-                                      devname);
+                                      "%s", devname);
 
                if (IS_ERR(device)) {
                        pr_warn("xillybus: Failed to create %s "
index 91d94b564433ad2aa3f8b6b5091ca26d03459477..2c4ed52ca849d45b385cbeed0d88a545d4a5fa5d 100644 (file)
@@ -981,4 +981,3 @@ MODULE_PARM_DESC(num_devices, "Number of zram devices");
 MODULE_LICENSE("Dual BSD/GPL");
 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
 MODULE_DESCRIPTION("Compressed RAM Block Device");
-MODULE_ALIAS("devname:zram");
index c9a9ddd1d0bc2aa5091d7e1678920df2d9292429..01bf5eb4f2384a66999157ddd855162c8e966f7a 100644 (file)
@@ -1758,8 +1758,7 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
                canon_change = (old->c_lflag ^ tty->termios.c_lflag) & ICANON;
        if (canon_change) {
                bitmap_zero(ldata->read_flags, N_TTY_BUF_SIZE);
-               ldata->line_start = 0;
-               ldata->canon_head = ldata->read_tail;
+               ldata->line_start = ldata->canon_head = ldata->read_tail;
                ldata->erasing = 0;
                ldata->lnext = 0;
        }
index 52379e56a31e7abc43a6642a799b1be7744be6c8..44077c0b7670075625650c9d00cc98a63bb90f5c 100644 (file)
@@ -667,30 +667,21 @@ static int pop_tx_x(struct eg20t_port *priv, unsigned char *buf)
 
 static int dma_push_rx(struct eg20t_port *priv, int size)
 {
-       struct tty_struct *tty;
        int room;
        struct uart_port *port = &priv->port;
        struct tty_port *tport = &port->state->port;
 
-       port = &priv->port;
-       tty = tty_port_tty_get(tport);
-       if (!tty) {
-               dev_dbg(priv->port.dev, "%s:tty is busy now", __func__);
-               return 0;
-       }
-
        room = tty_buffer_request_room(tport, size);
 
        if (room < size)
                dev_warn(port->dev, "Rx overrun: dropping %u bytes\n",
                         size - room);
        if (!room)
-               return room;
+               return 0;
 
        tty_insert_flip_string(tport, sg_virt(&priv->sg_rx), size);
 
        port->icount.rx += room;
-       tty_kref_put(tty);
 
        return room;
 }
@@ -1098,6 +1089,8 @@ static void pch_uart_err_ir(struct eg20t_port *priv, unsigned int lsr)
        if (tty == NULL) {
                for (i = 0; error_msg[i] != NULL; i++)
                        dev_err(&priv->pdev->dev, error_msg[i]);
+       } else {
+               tty_kref_put(tty);
        }
 }
 
index d0d972f7e43e3759fb03119786a1103764c86730..0489a2bdcdf9a1aff4af5dacd1d7766f9671a523 100644 (file)
@@ -732,7 +732,7 @@ static irqreturn_t tegra_uart_isr(int irq, void *data)
 static void tegra_uart_stop_rx(struct uart_port *u)
 {
        struct tegra_uart_port *tup = to_tegra_uport(u);
-       struct tty_struct *tty = tty_port_tty_get(&tup->uport.state->port);
+       struct tty_struct *tty;
        struct tty_port *port = &u->state->port;
        struct dma_tx_state state;
        unsigned long ier;
@@ -744,6 +744,8 @@ static void tegra_uart_stop_rx(struct uart_port *u)
        if (!tup->rx_in_progress)
                return;
 
+       tty = tty_port_tty_get(&tup->uport.state->port);
+
        tegra_uart_wait_sym_time(tup, 1); /* wait a character interval */
 
        ier = tup->ier_shadow;
index 03ba081c577251a0947466b79f23b36b359b51c1..6fd60fece6b4b01684695b7df4af376e695ff873 100644 (file)
@@ -1201,6 +1201,9 @@ int n_tty_ioctl_helper(struct tty_struct *tty, struct file *file,
                }
                return 0;
        case TCFLSH:
+               retval = tty_check_change(tty);
+               if (retval)
+                       return retval;
                return __tty_perform_flush(tty, arg);
        default:
                /* Try the mode commands */
index 4a851e15e58cdfb6832fbc14c93e3e96cb6f9515..77b47d82c9a69d05d7985068fb956f961b6481f1 100644 (file)
@@ -1,6 +1,6 @@
 config USB_CHIPIDEA
        tristate "ChipIdea Highspeed Dual Role Controller"
-       depends on (USB_EHCI_HCD && USB_GADGET) || (USB_EHCI_HCD && !USB_GADGET) || (!USB_EHCI_HCD && USB_GADGET)
+       depends on ((USB_EHCI_HCD && USB_GADGET) || (USB_EHCI_HCD && !USB_GADGET) || (!USB_EHCI_HCD && USB_GADGET)) && HAS_DMA
        help
          Say Y here if your system has a dual role high speed USB
          controller based on ChipIdea silicon IP. Currently, only the
index 74d998d9b45b28001e2f15f6026640ecb6e07085..be822a2c1776cc30c2df5f3ed7840aff68ef10ac 100644 (file)
@@ -131,7 +131,7 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
                if (ret) {
                        dev_err(&pdev->dev, "usbmisc init failed, ret=%d\n",
                                        ret);
-                       goto err_clk;
+                       goto err_phy;
                }
        }
 
@@ -143,7 +143,7 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
                dev_err(&pdev->dev,
                        "Can't register ci_hdrc platform device, err=%d\n",
                        ret);
-               goto err_clk;
+               goto err_phy;
        }
 
        if (data->usbmisc_data) {
@@ -164,6 +164,9 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
 
 disable_device:
        ci_hdrc_remove_device(data->ci_pdev);
+err_phy:
+       if (data->phy)
+               usb_phy_shutdown(data->phy);
 err_clk:
        clk_disable_unprepare(data->clk);
        return ret;
index 94626409559a96a79c0d027023dbade8a645ce58..23763dcec069b2e8d1ba6dd140fc9cf644d2ddec 100644 (file)
@@ -605,6 +605,7 @@ static int ci_hdrc_remove(struct platform_device *pdev)
        dbg_remove_files(ci);
        free_irq(ci->irq, ci);
        ci_role_destroy(ci);
+       kfree(ci->hw_bank.regmap);
 
        return 0;
 }
index 6b4c2f2eb94649c7da6fc577b54598d53ebdfec4..9333083dd1111c047c7016a44402242e2aa87f7d 100644 (file)
@@ -1600,6 +1600,8 @@ static void destroy_eps(struct ci_hdrc *ci)
        for (i = 0; i < ci->hw_ep_max; i++) {
                struct ci_hw_ep *hwep = &ci->ci_hw_ep[i];
 
+               if (hwep->pending_td)
+                       free_pending_td(hwep);
                dma_pool_free(ci->qh_pool, hwep->qh.ptr, hwep->qh.dma);
        }
 }
@@ -1667,13 +1669,13 @@ static int ci_udc_stop(struct usb_gadget *gadget,
                if (ci->platdata->notify_event)
                        ci->platdata->notify_event(ci,
                        CI_HDRC_CONTROLLER_STOPPED_EVENT);
-               ci->driver = NULL;
                spin_unlock_irqrestore(&ci->lock, flags);
                _gadget_stop_activity(&ci->gadget);
                spin_lock_irqsave(&ci->lock, flags);
                pm_runtime_put(&ci->gadget.dev);
        }
 
+       ci->driver = NULL;
        spin_unlock_irqrestore(&ci->lock, flags);
 
        return 0;
index 737e3c19967bee3e81f57cc041ec9989ec38fb57..71dc5d768fa5cef3edc5ac27c84c032d2a0a0dcf 100644 (file)
@@ -742,6 +742,22 @@ static int check_ctrlrecip(struct dev_state *ps, unsigned int requesttype,
                if ((index & ~USB_DIR_IN) == 0)
                        return 0;
                ret = findintfep(ps->dev, index);
+               if (ret < 0) {
+                       /*
+                        * Some not fully compliant Win apps seem to get
+                        * index wrong and have the endpoint number here
+                        * rather than the endpoint address (with the
+                        * correct direction). Win does let this through,
+                        * so we'll not reject it here but leave it to
+                        * the device to not break KVM. But we warn.
+                        */
+                       ret = findintfep(ps->dev, index ^ 0x80);
+                       if (ret >= 0)
+                               dev_info(&ps->dev->dev,
+                                       "%s: process %i (%s) requesting ep %02x but needs %02x\n",
+                                       __func__, task_pid_nr(current),
+                                       current->comm, index, index ^ 0x80);
+               }
                if (ret >= 0)
                        ret = checkintf(ps, ret);
                break;
index dde4c83516a1870bd4ed7a11e3fe433ced0979af..e6b682c6c236b8152561496b2e00bba22e75e25c 100644 (file)
@@ -3426,6 +3426,9 @@ static int usb_req_set_sel(struct usb_device *udev, enum usb3_link_state state)
        unsigned long long u2_pel;
        int ret;
 
+       if (udev->state != USB_STATE_CONFIGURED)
+               return 0;
+
        /* Convert SEL and PEL stored in ns to us */
        u1_sel = DIV_ROUND_UP(udev->u1_params.sel, 1000);
        u1_pel = DIV_ROUND_UP(udev->u1_params.pel, 1000);
index b870872e020f1e0bd8e93c157b76d145bc2b2122..70fc43027a5c65b6f29f22dc9ec64c18db220323 100644 (file)
@@ -1,7 +1,6 @@
 config USB_DWC3
        tristate "DesignWare USB3 DRD Core Support"
        depends on (USB || USB_GADGET) && HAS_DMA
-       depends on EXTCON
        select USB_XHCI_PLATFORM if USB_SUPPORT && USB_XHCI_HCD
        help
          Say Y or M here if your system has a Dual Role SuperSpeed
index 9b138129e856aa22b27c4a83398adf55c8949247..2e252aae51ca0bcc5da41b3251c45cec91cced7d 100644 (file)
@@ -28,6 +28,8 @@
 /* FIXME define these in <linux/pci_ids.h> */
 #define PCI_VENDOR_ID_SYNOPSYS         0x16c3
 #define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3        0xabcd
+#define PCI_DEVICE_ID_INTEL_BYT                0x0f37
+#define PCI_DEVICE_ID_INTEL_MRFLD      0x119e
 
 struct dwc3_pci {
        struct device           *dev;
@@ -187,6 +189,8 @@ static DEFINE_PCI_DEVICE_TABLE(dwc3_pci_id_table) = {
                PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS,
                                PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3),
        },
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BYT), },
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MRFLD), },
        {  }    /* Terminating Entry */
 };
 MODULE_DEVICE_TABLE(pci, dwc3_pci_id_table);
index f168eaebdef8fc6731de7c6994c658737495fdce..5452c0fce36074d4238e3553bb00d879d8df9ac3 100644 (file)
@@ -2611,15 +2611,13 @@ int dwc3_gadget_init(struct dwc3 *dwc)
        ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget);
        if (ret) {
                dev_err(dwc->dev, "failed to register udc\n");
-               goto err5;
+               goto err4;
        }
 
        return 0;
 
-err5:
-       dwc3_gadget_free_endpoints(dwc);
-
 err4:
+       dwc3_gadget_free_endpoints(dwc);
        dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
                        dwc->ep0_bounce, dwc->ep0_bounce_addr);
 
index 5a5acf22c694514f5b3ebede7903f9912e4138b5..e126b6b248e63cd1b412bea2561e90bd15a104d3 100644 (file)
@@ -113,12 +113,6 @@ static int __init cdc_do_config(struct usb_configuration *c)
                c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
        }
 
-       fi_ecm = usb_get_function_instance("ecm");
-       if (IS_ERR(fi_ecm)) {
-               status = PTR_ERR(fi_ecm);
-               goto err_func_ecm;
-       }
-
        f_ecm = usb_get_function(fi_ecm);
        if (IS_ERR(f_ecm)) {
                status = PTR_ERR(f_ecm);
@@ -129,35 +123,24 @@ static int __init cdc_do_config(struct usb_configuration *c)
        if (status)
                goto err_add_ecm;
 
-       fi_serial = usb_get_function_instance("acm");
-       if (IS_ERR(fi_serial)) {
-               status = PTR_ERR(fi_serial);
-               goto err_get_acm;
-       }
-
        f_acm = usb_get_function(fi_serial);
        if (IS_ERR(f_acm)) {
                status = PTR_ERR(f_acm);
-               goto err_func_acm;
+               goto err_get_acm;
        }
 
        status = usb_add_function(c, f_acm);
        if (status)
                goto err_add_acm;
-
        return 0;
 
 err_add_acm:
        usb_put_function(f_acm);
-err_func_acm:
-       usb_put_function_instance(fi_serial);
 err_get_acm:
        usb_remove_function(c, f_ecm);
 err_add_ecm:
        usb_put_function(f_ecm);
 err_get_ecm:
-       usb_put_function_instance(fi_ecm);
-err_func_ecm:
        return status;
 }
 
index 06ecd08fd57a3174022a3579f458c84f30440123..b8a2376971a47aa357794117f74390aaee01a594 100644 (file)
@@ -923,8 +923,9 @@ static int dummy_udc_stop(struct usb_gadget *g,
        struct dummy_hcd        *dum_hcd = gadget_to_dummy_hcd(g);
        struct dummy            *dum = dum_hcd->dum;
 
-       dev_dbg(udc_dev(dum), "unregister gadget driver '%s'\n",
-                       driver->driver.name);
+       if (driver)
+               dev_dbg(udc_dev(dum), "unregister gadget driver '%s'\n",
+                               driver->driver.name);
 
        dum->driver = NULL;
 
@@ -1000,8 +1001,8 @@ static int dummy_udc_remove(struct platform_device *pdev)
 {
        struct dummy    *dum = platform_get_drvdata(pdev);
 
-       usb_del_gadget_udc(&dum->gadget);
        device_remove_file(&dum->gadget.dev, &dev_attr_function);
+       usb_del_gadget_udc(&dum->gadget);
        return 0;
 }
 
index edab45da37417e16960b75033a46a8453475b77b..8d9e6f7e8f1a5a6c36965dfdd4c5a213f4f917aa 100644 (file)
@@ -995,7 +995,7 @@ static void ecm_unbind(struct usb_configuration *c, struct usb_function *f)
        usb_ep_free_request(ecm->notify, ecm->notify_req);
 }
 
-struct usb_function *ecm_alloc(struct usb_function_instance *fi)
+static struct usb_function *ecm_alloc(struct usb_function_instance *fi)
 {
        struct f_ecm    *ecm;
        struct f_ecm_opts *opts;
index d00392d879db3aa3e75d093d4e26041fa42bcb54..d61c11d765d0a92b9e9e79270e9de4b3cb474ca3 100644 (file)
@@ -624,7 +624,7 @@ static void eem_unbind(struct usb_configuration *c, struct usb_function *f)
        usb_free_all_descriptors(f);
 }
 
-struct usb_function *eem_alloc(struct usb_function_instance *fi)
+static struct usb_function *eem_alloc(struct usb_function_instance *fi)
 {
        struct f_eem    *eem;
        struct f_eem_opts *opts;
index 1a66c5baa0d1f292188a1756cf17d5243b2ffe58..0658908d8968d3f030d156a3c263e885f11e3477 100644 (file)
@@ -1034,37 +1034,19 @@ struct ffs_sb_fill_data {
        struct ffs_file_perms perms;
        umode_t root_mode;
        const char *dev_name;
-       union {
-               /* set by ffs_fs_mount(), read by ffs_sb_fill() */
-               void *private_data;
-               /* set by ffs_sb_fill(), read by ffs_fs_mount */
-               struct ffs_data *ffs_data;
-       };
+       struct ffs_data *ffs_data;
 };
 
 static int ffs_sb_fill(struct super_block *sb, void *_data, int silent)
 {
        struct ffs_sb_fill_data *data = _data;
        struct inode    *inode;
-       struct ffs_data *ffs;
+       struct ffs_data *ffs = data->ffs_data;
 
        ENTER();
 
-       /* Initialise data */
-       ffs = ffs_data_new();
-       if (unlikely(!ffs))
-               goto Enomem;
-
        ffs->sb              = sb;
-       ffs->dev_name        = kstrdup(data->dev_name, GFP_KERNEL);
-       if (unlikely(!ffs->dev_name))
-               goto Enomem;
-       ffs->file_perms      = data->perms;
-       ffs->private_data    = data->private_data;
-
-       /* used by the caller of this function */
-       data->ffs_data       = ffs;
-
+       data->ffs_data       = NULL;
        sb->s_fs_info        = ffs;
        sb->s_blocksize      = PAGE_CACHE_SIZE;
        sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
@@ -1080,17 +1062,14 @@ static int ffs_sb_fill(struct super_block *sb, void *_data, int silent)
                                  &data->perms);
        sb->s_root = d_make_root(inode);
        if (unlikely(!sb->s_root))
-               goto Enomem;
+               return -ENOMEM;
 
        /* EP0 file */
        if (unlikely(!ffs_sb_create_file(sb, "ep0", ffs,
                                         &ffs_ep0_operations, NULL)))
-               goto Enomem;
+               return -ENOMEM;
 
        return 0;
-
-Enomem:
-       return -ENOMEM;
 }
 
 static int ffs_fs_parse_opts(struct ffs_sb_fill_data *data, char *opts)
@@ -1193,6 +1172,7 @@ ffs_fs_mount(struct file_system_type *t, int flags,
        struct dentry *rv;
        int ret;
        void *ffs_dev;
+       struct ffs_data *ffs;
 
        ENTER();
 
@@ -1200,18 +1180,30 @@ ffs_fs_mount(struct file_system_type *t, int flags,
        if (unlikely(ret < 0))
                return ERR_PTR(ret);
 
+       ffs = ffs_data_new();
+       if (unlikely(!ffs))
+               return ERR_PTR(-ENOMEM);
+       ffs->file_perms = data.perms;
+
+       ffs->dev_name = kstrdup(dev_name, GFP_KERNEL);
+       if (unlikely(!ffs->dev_name)) {
+               ffs_data_put(ffs);
+               return ERR_PTR(-ENOMEM);
+       }
+
        ffs_dev = functionfs_acquire_dev_callback(dev_name);
-       if (IS_ERR(ffs_dev))
-               return ffs_dev;
+       if (IS_ERR(ffs_dev)) {
+               ffs_data_put(ffs);
+               return ERR_CAST(ffs_dev);
+       }
+       ffs->private_data = ffs_dev;
+       data.ffs_data = ffs;
 
-       data.dev_name = dev_name;
-       data.private_data = ffs_dev;
        rv = mount_nodev(t, flags, &data, ffs_sb_fill);
-
-       /* data.ffs_data is set by ffs_sb_fill */
-       if (IS_ERR(rv))
+       if (IS_ERR(rv) && data.ffs_data) {
                functionfs_release_dev_callback(data.ffs_data);
-
+               ffs_data_put(data.ffs_data);
+       }
        return rv;
 }
 
index 313b835eedfd58948d23a96a85ade673347e8114..a01d7d38c01685135a503885342be7f9ec63ebbd 100644 (file)
@@ -2260,10 +2260,12 @@ reset:
                /* Disable the endpoints */
                if (fsg->bulk_in_enabled) {
                        usb_ep_disable(fsg->bulk_in);
+                       fsg->bulk_in->driver_data = NULL;
                        fsg->bulk_in_enabled = 0;
                }
                if (fsg->bulk_out_enabled) {
                        usb_ep_disable(fsg->bulk_out);
+                       fsg->bulk_out->driver_data = NULL;
                        fsg->bulk_out_enabled = 0;
                }
 
index 32db2eee2d8738fe5388f8c00bd2c644ac2b5b20..bbbfd19487784c6ebbfb107a24a7233c45740a45 100644 (file)
@@ -1214,6 +1214,6 @@ static struct platform_driver fotg210_driver = {
 
 module_platform_driver(fotg210_driver);
 
-MODULE_AUTHOR("Yuan-Hsin Chen <yhchen@faraday-tech.com>");
+MODULE_AUTHOR("Yuan-Hsin Chen, Feng-Hsin Chiang <john453@faraday-tech.com>");
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION(DRIVER_DESC);
index f1dd6daabe217c0742140c1619c3be5aa22c8c54..b278abe524539640f14a29f70a1f3a9d25fa1747 100644 (file)
@@ -22,7 +22,7 @@
 
 MODULE_DESCRIPTION("FUSB300  USB gadget driver");
 MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Yuan Hsin Chen <yhchen@faraday-tech.com>");
+MODULE_AUTHOR("Yuan-Hsin Chen, Feng-Hsin Chiang <john453@faraday-tech.com>");
 MODULE_ALIAS("platform:fusb300_udc");
 
 #define DRIVER_VERSION "20 October 2010"
index 2a1ebefd8f9eb31318fc861fbc010214148db18d..23393254a8a35593526062cc09880f50c809dced 100644 (file)
@@ -179,7 +179,7 @@ err_conf:
        return ret;
 }
 
-static int rndis_config_register(struct usb_composite_dev *cdev)
+static __ref int rndis_config_register(struct usb_composite_dev *cdev)
 {
        static struct usb_configuration config = {
                .bConfigurationValue    = MULTI_RNDIS_CONFIG_NUM,
@@ -194,7 +194,7 @@ static int rndis_config_register(struct usb_composite_dev *cdev)
 
 #else
 
-static int rndis_config_register(struct usb_composite_dev *cdev)
+static __ref int rndis_config_register(struct usb_composite_dev *cdev)
 {
        return 0;
 }
@@ -241,7 +241,7 @@ err_conf:
        return ret;
 }
 
-static int cdc_config_register(struct usb_composite_dev *cdev)
+static __ref int cdc_config_register(struct usb_composite_dev *cdev)
 {
        static struct usb_configuration config = {
                .bConfigurationValue    = MULTI_CDC_CONFIG_NUM,
@@ -256,7 +256,7 @@ static int cdc_config_register(struct usb_composite_dev *cdev)
 
 #else
 
-static int cdc_config_register(struct usb_composite_dev *cdev)
+static __ref int cdc_config_register(struct usb_composite_dev *cdev)
 {
        return 0;
 }
index bbb6e98c4384292d3446b62f8671decaf0a51c55..561b30efb8ee84de6fc37c33005cdadc6fbcc5ff 100644 (file)
@@ -645,6 +645,7 @@ static int  mv_u3d_ep_disable(struct usb_ep *_ep)
        struct mv_u3d_ep *ep;
        struct mv_u3d_ep_context *ep_context;
        u32 epxcr, direction;
+       unsigned long flags;
 
        if (!_ep)
                return -EINVAL;
@@ -661,7 +662,9 @@ static int  mv_u3d_ep_disable(struct usb_ep *_ep)
        direction = mv_u3d_ep_dir(ep);
 
        /* nuke all pending requests (does flush) */
+       spin_lock_irqsave(&u3d->lock, flags);
        mv_u3d_nuke(ep, -ESHUTDOWN);
+       spin_unlock_irqrestore(&u3d->lock, flags);
 
        /* Disable the endpoint for Rx or Tx and reset the endpoint type */
        if (direction == MV_U3D_EP_DIR_OUT) {
index d69b36a99dbcd8ae5a1bc57275398be86a2562ca..6bddf1aa23479f388ac101355d500a858d8b02a4 100644 (file)
@@ -2475,8 +2475,6 @@ irq_retry:
        if (gintsts & GINTSTS_ErlySusp) {
                dev_dbg(hsotg->dev, "GINTSTS_ErlySusp\n");
                writel(GINTSTS_ErlySusp, hsotg->regs + GINTSTS);
-
-               s3c_hsotg_disconnect(hsotg);
        }
 
        /*
@@ -2962,9 +2960,6 @@ static int s3c_hsotg_udc_stop(struct usb_gadget *gadget,
        if (!hsotg)
                return -ENODEV;
 
-       if (!driver || driver != hsotg->driver || !driver->unbind)
-               return -EINVAL;
-
        /* all endpoints should be shutdown */
        for (ep = 0; ep < hsotg->num_of_eps; ep++)
                s3c_hsotg_ep_disable(&hsotg->eps[ep].ep);
@@ -2972,15 +2967,15 @@ static int s3c_hsotg_udc_stop(struct usb_gadget *gadget,
        spin_lock_irqsave(&hsotg->lock, flags);
 
        s3c_hsotg_phy_disable(hsotg);
-       regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies), hsotg->supplies);
 
-       hsotg->driver = NULL;
+       if (!driver)
+               hsotg->driver = NULL;
+
        hsotg->gadget.speed = USB_SPEED_UNKNOWN;
 
        spin_unlock_irqrestore(&hsotg->lock, flags);
 
-       dev_info(hsotg->dev, "unregistered gadget driver '%s'\n",
-                driver->driver.name);
+       regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies), hsotg->supplies);
 
        return 0;
 }
index 947b009009f111ba710954ea639962b4d4008463..f2407b2e8a996210aec7f3e7a442119fd6928924 100644 (file)
@@ -130,7 +130,7 @@ static int usb_hcd_fsl_probe(const struct hc_driver *driver,
        }
 
        /* Enable USB controller, 83xx or 8536 */
-       if (pdata->have_sysif_regs)
+       if (pdata->have_sysif_regs && pdata->controller_ver < FSL_USB_VER_1_6)
                setbits32(hcd->regs + FSL_SOC_USB_CTRL, 0x4);
 
        /* Don't need to set host mode here. It will be done by tdi_reset() */
@@ -232,15 +232,9 @@ static int ehci_fsl_setup_phy(struct usb_hcd *hcd,
        case FSL_USB2_PHY_ULPI:
                if (pdata->have_sysif_regs && pdata->controller_ver) {
                        /* controller version 1.6 or above */
+                       clrbits32(non_ehci + FSL_SOC_USB_CTRL, UTMI_PHY_EN);
                        setbits32(non_ehci + FSL_SOC_USB_CTRL,
-                                       ULPI_PHY_CLK_SEL);
-                       /*
-                        * Due to controller issue of PHY_CLK_VALID in ULPI
-                        * mode, we set USB_CTRL_USB_EN before checking
-                        * PHY_CLK_VALID, otherwise PHY_CLK_VALID doesn't work.
-                        */
-                       clrsetbits_be32(non_ehci + FSL_SOC_USB_CTRL,
-                                       UTMI_PHY_EN, USB_CTRL_USB_EN);
+                               ULPI_PHY_CLK_SEL | USB_CTRL_USB_EN);
                }
                portsc |= PORT_PTS_ULPI;
                break;
@@ -270,8 +264,9 @@ static int ehci_fsl_setup_phy(struct usb_hcd *hcd,
        if (pdata->have_sysif_regs && pdata->controller_ver &&
            (phy_mode == FSL_USB2_PHY_ULPI)) {
                /* check PHY_CLK_VALID to get phy clk valid */
-               if (!spin_event_timeout(in_be32(non_ehci + FSL_SOC_USB_CTRL) &
-                               PHY_CLK_VALID, FSL_USB_PHY_CLK_TIMEOUT, 0)) {
+               if (!(spin_event_timeout(in_be32(non_ehci + FSL_SOC_USB_CTRL) &
+                               PHY_CLK_VALID, FSL_USB_PHY_CLK_TIMEOUT, 0) ||
+                               in_be32(non_ehci + FSL_SOC_USB_PRICTRL))) {
                        printk(KERN_WARNING "fsl-ehci: USB PHY clock invalid\n");
                        return -EINVAL;
                }
@@ -669,7 +664,7 @@ static const struct hc_driver ehci_fsl_hc_driver = {
         * generic hardware linkage
         */
        .irq = ehci_irq,
-       .flags = HCD_USB2 | HCD_MEMORY | HCD_BH,
+       .flags = HCD_USB2 | HCD_MEMORY,
 
        /*
         * basic lifecycle operations
index b52a66ce92e8592b123239aa24b724dddfd085fa..83ab51af250f158e735373760f8c008ce6fe0ad8 100644 (file)
@@ -43,7 +43,7 @@ static const struct hc_driver ehci_grlib_hc_driver = {
         * generic hardware linkage
         */
        .irq                    = ehci_irq,
-       .flags                  = HCD_MEMORY | HCD_USB2 | HCD_BH,
+       .flags                  = HCD_MEMORY | HCD_USB2,
 
        /*
         * basic lifecycle operations
index 5d6022f30ebe94b2228c6b38851217af1ee8f477..86ab9fd9fe9e938fc29cadeac72a935e8c26b8ff 100644 (file)
@@ -1158,7 +1158,7 @@ static const struct hc_driver ehci_hc_driver = {
         * generic hardware linkage
         */
        .irq =                  ehci_irq,
-       .flags =                HCD_MEMORY | HCD_USB2 | HCD_BH,
+       .flags =                HCD_MEMORY | HCD_USB2,
 
        /*
         * basic lifecycle operations
index 417c10da945078e37ddf20e7be290e996936074e..35cdbd88bbbef62a93a3aa869c12dbfda8183bf2 100644 (file)
@@ -96,7 +96,7 @@ static const struct hc_driver mv_ehci_hc_driver = {
         * generic hardware linkage
         */
        .irq = ehci_irq,
-       .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
+       .flags = HCD_MEMORY | HCD_USB2,
 
        /*
         * basic lifecycle operations
index ab0397e4d8f3eadae916d07434f4431f3d59def3..45cc00158412ac8a380cda88a28a4bb7536d62fa 100644 (file)
@@ -51,7 +51,7 @@ static const struct hc_driver ehci_octeon_hc_driver = {
         * generic hardware linkage
         */
        .irq                    = ehci_irq,
-       .flags                  = HCD_MEMORY | HCD_USB2 | HCD_BH,
+       .flags                  = HCD_MEMORY | HCD_USB2,
 
        /*
         * basic lifecycle operations
index 6bd299e61f58d23202183f3186c9542f99e93a23..854c2ec7b699d4effe2c9ca6ae4ab264e1a73cd8 100644 (file)
@@ -361,7 +361,7 @@ static struct pci_driver ehci_pci_driver = {
        .remove =       usb_hcd_pci_remove,
        .shutdown =     usb_hcd_pci_shutdown,
 
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_PM
        .driver =       {
                .pm =   &usb_hcd_pci_pm_ops
        },
index 893b707f0000abf0e323f39b28b6060a3293bcef..601e208bd782c07e9d0bb1b60d238ccbb7774758 100644 (file)
@@ -286,7 +286,7 @@ static const struct hc_driver ehci_msp_hc_driver = {
 #else
        .irq =                  ehci_irq,
 #endif
-       .flags =                HCD_MEMORY | HCD_USB2 | HCD_BH,
+       .flags =                HCD_MEMORY | HCD_USB2,
 
        /*
         * basic lifecycle operations
index 6cc5567bf9c87faaa8c4a21dcb4202e8e6e7fb94..932293fa32de657de2e36ba091127e50009a6ff7 100644 (file)
@@ -28,7 +28,7 @@ static const struct hc_driver ehci_ppc_of_hc_driver = {
         * generic hardware linkage
         */
        .irq                    = ehci_irq,
-       .flags                  = HCD_MEMORY | HCD_USB2 | HCD_BH,
+       .flags                  = HCD_MEMORY | HCD_USB2,
 
        /*
         * basic lifecycle operations
index 8188542ba17ea01214a3ab0f269fe07cb6cb1744..fd983771b02559cb56c6210e6813d9b223a80f7a 100644 (file)
@@ -71,7 +71,7 @@ static const struct hc_driver ps3_ehci_hc_driver = {
        .product_desc           = "PS3 EHCI Host Controller",
        .hcd_priv_size          = sizeof(struct ehci_hcd),
        .irq                    = ehci_irq,
-       .flags                  = HCD_MEMORY | HCD_USB2 | HCD_BH,
+       .flags                  = HCD_MEMORY | HCD_USB2,
        .reset                  = ps3_ehci_hc_reset,
        .start                  = ehci_run,
        .stop                   = ehci_stop,
index e321804c34755553afcf3c1ceab38678ed27af1b..a7f776a13eb17133459f23ac584b60287e544197 100644 (file)
@@ -247,6 +247,8 @@ static int qtd_copy_status (
 
 static void
 ehci_urb_done(struct ehci_hcd *ehci, struct urb *urb, int status)
+__releases(ehci->lock)
+__acquires(ehci->lock)
 {
        if (usb_pipetype(urb->pipe) == PIPE_INTERRUPT) {
                /* ... update hc-wide periodic stats */
@@ -272,8 +274,11 @@ ehci_urb_done(struct ehci_hcd *ehci, struct urb *urb, int status)
                urb->actual_length, urb->transfer_buffer_length);
 #endif
 
+       /* complete() can reenter this HCD */
        usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
+       spin_unlock (&ehci->lock);
        usb_hcd_giveback_urb(ehci_to_hcd(ehci), urb, status);
+       spin_lock (&ehci->lock);
 }
 
 static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh);
index 8a734498079bc176938c57a6b132c146e9be00dd..b2de52d3961488f249aeb9d4026efe2d603b72bb 100644 (file)
@@ -55,7 +55,7 @@ const struct hc_driver ehci_sead3_hc_driver = {
         * generic hardware linkage
         */
        .irq                    = ehci_irq,
-       .flags                  = HCD_MEMORY | HCD_USB2 | HCD_BH,
+       .flags                  = HCD_MEMORY | HCD_USB2,
 
        /*
         * basic lifecycle operations
index dc899eb2b86183561351d78e8dba1ceffc9cbb18..93e59a13bc1fec919ef81690bbce9f4bdcd3e951 100644 (file)
@@ -36,7 +36,7 @@ static const struct hc_driver ehci_sh_hc_driver = {
         * generic hardware linkage
         */
        .irq                            = ehci_irq,
-       .flags                          = HCD_USB2 | HCD_MEMORY | HCD_BH,
+       .flags                          = HCD_USB2 | HCD_MEMORY,
 
        /*
         * basic lifecycle operations
index 67026ffbf9a871c9780a4b0b7f0d6c739c904e73..cca4be90a864dba009c852f606653fb5fe60d568 100644 (file)
@@ -61,7 +61,7 @@ static const struct hc_driver ehci_tilegx_hc_driver = {
         * Generic hardware linkage.
         */
        .irq                    = ehci_irq,
-       .flags                  = HCD_MEMORY | HCD_USB2 | HCD_BH,
+       .flags                  = HCD_MEMORY | HCD_USB2,
 
        /*
         * Basic lifecycle operations.
index 1c370dfbee0d35e6a3cf2b1288836df2116599c5..59e0e24c753febfb76369be8f872cb3731c2f365 100644 (file)
@@ -108,7 +108,7 @@ static const struct hc_driver ehci_w90x900_hc_driver = {
         * generic hardware linkage
         */
        .irq = ehci_irq,
-       .flags = HCD_USB2|HCD_MEMORY|HCD_BH,
+       .flags = HCD_USB2|HCD_MEMORY,
 
        /*
         * basic lifecycle operations
index 95979f9f4381d8e8e573c7e0fe254d5585d23ab8..eba962e6ebfbbd8ffdd46a85e0fed5ec9b18b78a 100644 (file)
@@ -79,7 +79,7 @@ static const struct hc_driver ehci_xilinx_of_hc_driver = {
         * generic hardware linkage
         */
        .irq                    = ehci_irq,
-       .flags                  = HCD_MEMORY | HCD_USB2 | HCD_BH,
+       .flags                  = HCD_MEMORY | HCD_USB2,
 
        /*
         * basic lifecycle operations
index 9e0020d9e4c8cc01353ef4b804a9ba9d50bc8cfd..abd5050a4899bdb4518294175477a418583c4e83 100644 (file)
@@ -24,7 +24,7 @@ struct fsl_usb2_dev_data {
        enum fsl_usb2_operating_modes op_mode;  /* operating mode */
 };
 
-struct fsl_usb2_dev_data dr_mode_data[] = {
+static struct fsl_usb2_dev_data dr_mode_data[] = {
        {
                .dr_mode = "host",
                .drivers = { "fsl-ehci", NULL, NULL, },
@@ -42,7 +42,7 @@ struct fsl_usb2_dev_data dr_mode_data[] = {
        },
 };
 
-struct fsl_usb2_dev_data *get_dr_mode_data(struct device_node *np)
+static struct fsl_usb2_dev_data *get_dr_mode_data(struct device_node *np)
 {
        const unsigned char *prop;
        int i;
@@ -75,7 +75,7 @@ static enum fsl_usb2_phy_modes determine_usb_phy(const char *phy_type)
        return FSL_USB2_PHY_NONE;
 }
 
-struct platform_device *fsl_usb2_device_register(
+static struct platform_device *fsl_usb2_device_register(
                                        struct platform_device *ofdev,
                                        struct fsl_usb2_platform_data *pdata,
                                        const char *name, int id)
index 60a5de505ca1845609fe34278405e752ddf73226..adb01d950a165f7cf7d5e1a54fe6b674b4cb533a 100644 (file)
@@ -824,13 +824,13 @@ static int imx21_hc_urb_enqueue_isoc(struct usb_hcd *hcd,
                        i = DIV_ROUND_UP(wrap_frame(
                                        cur_frame - urb->start_frame),
                                        urb->interval);
-                       if (urb->transfer_flags & URB_ISO_ASAP) {
+
+                       /* Treat underruns as if URB_ISO_ASAP was set */
+                       if ((urb->transfer_flags & URB_ISO_ASAP) ||
+                                       i >= urb->number_of_packets) {
                                urb->start_frame = wrap_frame(urb->start_frame
                                                + i * urb->interval);
                                i = 0;
-                       } else if (i >= urb->number_of_packets) {
-                               ret = -EXDEV;
-                               goto alloc_dmem_failed;
                        }
                }
        }
index 8f6b695af6a470c5f5d5a5ca6a278e3a1186dfcc..604cad1bcf9cd984666aaf0b2b797b48ee6e3382 100644 (file)
@@ -216,31 +216,26 @@ static int ohci_urb_enqueue (
                        frame &= ~(ed->interval - 1);
                        frame |= ed->branch;
                        urb->start_frame = frame;
+                       ed->last_iso = frame + ed->interval * (size - 1);
                }
        } else if (ed->type == PIPE_ISOCHRONOUS) {
                u16     next = ohci_frame_no(ohci) + 1;
                u16     frame = ed->last_iso + ed->interval;
+               u16     length = ed->interval * (size - 1);
 
                /* Behind the scheduling threshold? */
                if (unlikely(tick_before(frame, next))) {
 
-                       /* USB_ISO_ASAP: Round up to the first available slot */
+                       /* URB_ISO_ASAP: Round up to the first available slot */
                        if (urb->transfer_flags & URB_ISO_ASAP) {
                                frame += (next - frame + ed->interval - 1) &
                                                -ed->interval;
 
                        /*
-                        * Not ASAP: Use the next slot in the stream.  If
-                        * the entire URB falls before the threshold, fail.
+                        * Not ASAP: Use the next slot in the stream,
+                        * no matter what.
                         */
                        } else {
-                               if (tick_before(frame + ed->interval *
-                                       (urb->number_of_packets - 1), next)) {
-                                       retval = -EXDEV;
-                                       usb_hcd_unlink_urb_from_ep(hcd, urb);
-                                       goto fail;
-                               }
-
                                /*
                                 * Some OHCI hardware doesn't handle late TDs
                                 * correctly.  After retiring them it proceeds
@@ -251,9 +246,16 @@ static int ohci_urb_enqueue (
                                urb_priv->td_cnt = DIV_ROUND_UP(
                                                (u16) (next - frame),
                                                ed->interval);
+                               if (urb_priv->td_cnt >= urb_priv->length) {
+                                       ++urb_priv->td_cnt;     /* Mark it */
+                                       ohci_dbg(ohci, "iso underrun %p (%u+%u < %u)\n",
+                                                       urb, frame, length,
+                                                       next);
+                               }
                        }
                }
                urb->start_frame = frame;
+               ed->last_iso = frame + length;
        }
 
        /* fill the TDs and link them to the ed; and
index df4a6707322d322dc292e4d11bfdf926e34dd3ce..e7f577e636240b25a74f6b5f4b43c9ebdc99e9de 100644 (file)
@@ -41,9 +41,13 @@ finish_urb(struct ohci_hcd *ohci, struct urb *urb, int status)
 __releases(ohci->lock)
 __acquires(ohci->lock)
 {
-        struct device *dev = ohci_to_hcd(ohci)->self.controller;
+       struct device *dev = ohci_to_hcd(ohci)->self.controller;
+       struct usb_host_endpoint *ep = urb->ep;
+       struct urb_priv *urb_priv;
+
        // ASSERT (urb->hcpriv != 0);
 
+ restart:
        urb_free_priv (ohci, urb->hcpriv);
        urb->hcpriv = NULL;
        if (likely(status == -EINPROGRESS))
@@ -80,6 +84,21 @@ __acquires(ohci->lock)
                ohci->hc_control &= ~(OHCI_CTRL_PLE|OHCI_CTRL_IE);
                ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
        }
+
+       /*
+        * An isochronous URB that is sumitted too late won't have any TDs
+        * (marked by the fact that the td_cnt value is larger than the
+        * actual number of TDs).  If the next URB on this endpoint is like
+        * that, give it back now.
+        */
+       if (!list_empty(&ep->urb_list)) {
+               urb = list_first_entry(&ep->urb_list, struct urb, urb_list);
+               urb_priv = urb->hcpriv;
+               if (urb_priv->td_cnt > urb_priv->length) {
+                       status = 0;
+                       goto restart;
+               }
+       }
 }
 
 
@@ -546,7 +565,6 @@ td_fill (struct ohci_hcd *ohci, u32 info,
                td->hwCBP = cpu_to_hc32 (ohci, data & 0xFFFFF000);
                *ohci_hwPSWp(ohci, td, 0) = cpu_to_hc16 (ohci,
                                                (data & 0x0FFF) | 0xE000);
-               td->ed->last_iso = info & 0xffff;
        } else {
                td->hwCBP = cpu_to_hc32 (ohci, data);
        }
@@ -996,7 +1014,7 @@ rescan_this:
                        urb_priv->td_cnt++;
 
                        /* if URB is done, clean up */
-                       if (urb_priv->td_cnt == urb_priv->length) {
+                       if (urb_priv->td_cnt >= urb_priv->length) {
                                modified = completed = 1;
                                finish_urb(ohci, urb, 0);
                        }
@@ -1086,7 +1104,7 @@ static void takeback_td(struct ohci_hcd *ohci, struct td *td)
        urb_priv->td_cnt++;
 
        /* If all this urb's TDs are done, call complete() */
-       if (urb_priv->td_cnt == urb_priv->length)
+       if (urb_priv->td_cnt >= urb_priv->length)
                finish_urb(ohci, urb, status);
 
        /* clean schedule:  unlink EDs that are no longer busy */
index c300bd2f7d1c53e0ee684b0c6f9cab185fb1c55c..0f228c46eedaab97c9351f784b8604d6144bb517 100644 (file)
@@ -293,7 +293,7 @@ static struct pci_driver uhci_pci_driver = {
        .remove =       usb_hcd_pci_remove,
        .shutdown =     uhci_shutdown,
 
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_PM
        .driver =       {
                .pm =   &usb_hcd_pci_pm_ops
        },
index 041c6ddb695c8ec6fa17d371fe7904de2e47d5ab..da6f56d996ce56f61cd1be20a093c2c16ff54702 100644 (file)
@@ -1303,7 +1303,7 @@ static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb,
                }
 
                /* Fell behind? */
-               if (uhci_frame_before_eq(frame, next)) {
+               if (!uhci_frame_before_eq(next, frame)) {
 
                        /* USB_ISO_ASAP: Round up to the first available slot */
                        if (urb->transfer_flags & URB_ISO_ASAP)
@@ -1311,13 +1311,17 @@ static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb,
                                                -qh->period;
 
                        /*
-                        * Not ASAP: Use the next slot in the stream.  If
-                        * the entire URB falls before the threshold, fail.
+                        * Not ASAP: Use the next slot in the stream,
+                        * no matter what.
                         */
                        else if (!uhci_frame_before_eq(next,
                                        frame + (urb->number_of_packets - 1) *
                                                qh->period))
-                               return -EXDEV;
+                               dev_dbg(uhci_dev(uhci), "iso underrun %p (%u+%u < %u)\n",
+                                               urb, frame,
+                                               (urb->number_of_packets - 1) *
+                                                       qh->period,
+                                               next);
                }
        }
 
index fae697ed0b708352e06e6e6135a2b49ece4ea34a..773a6b28c4f1657670547265e0d73caed6366907 100644 (file)
@@ -287,7 +287,7 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
                if (virt_dev->eps[i].ring && virt_dev->eps[i].ring->dequeue)
                        xhci_queue_stop_endpoint(xhci, slot_id, i, suspend);
        }
-       cmd->command_trb = xhci->cmd_ring->enqueue;
+       cmd->command_trb = xhci_find_next_enqueue(xhci->cmd_ring);
        list_add_tail(&cmd->cmd_list, &virt_dev->cmd_list);
        xhci_queue_stop_endpoint(xhci, slot_id, 0, suspend);
        xhci_ring_cmd_db(xhci);
@@ -552,11 +552,15 @@ void xhci_del_comp_mod_timer(struct xhci_hcd *xhci, u32 status, u16 wIndex)
  *  - Mark a port as being done with device resume,
  *    and ring the endpoint doorbells.
  *  - Stop the Synopsys redriver Compliance Mode polling.
+ *  - Drop and reacquire the xHCI lock, in order to wait for port resume.
  */
 static u32 xhci_get_port_status(struct usb_hcd *hcd,
                struct xhci_bus_state *bus_state,
                __le32 __iomem **port_array,
-               u16 wIndex, u32 raw_port_status)
+               u16 wIndex, u32 raw_port_status,
+               unsigned long flags)
+       __releases(&xhci->lock)
+       __acquires(&xhci->lock)
 {
        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
        u32 status = 0;
@@ -591,21 +595,42 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
                        return 0xffffffff;
                if (time_after_eq(jiffies,
                                        bus_state->resume_done[wIndex])) {
+                       int time_left;
+
                        xhci_dbg(xhci, "Resume USB2 port %d\n",
                                        wIndex + 1);
                        bus_state->resume_done[wIndex] = 0;
                        clear_bit(wIndex, &bus_state->resuming_ports);
+
+                       set_bit(wIndex, &bus_state->rexit_ports);
                        xhci_set_link_state(xhci, port_array, wIndex,
                                        XDEV_U0);
-                       xhci_dbg(xhci, "set port %d resume\n",
-                                       wIndex + 1);
-                       slot_id = xhci_find_slot_id_by_port(hcd, xhci,
-                                       wIndex + 1);
-                       if (!slot_id) {
-                               xhci_dbg(xhci, "slot_id is zero\n");
-                               return 0xffffffff;
+
+                       spin_unlock_irqrestore(&xhci->lock, flags);
+                       time_left = wait_for_completion_timeout(
+                                       &bus_state->rexit_done[wIndex],
+                                       msecs_to_jiffies(
+                                               XHCI_MAX_REXIT_TIMEOUT));
+                       spin_lock_irqsave(&xhci->lock, flags);
+
+                       if (time_left) {
+                               slot_id = xhci_find_slot_id_by_port(hcd,
+                                               xhci, wIndex + 1);
+                               if (!slot_id) {
+                                       xhci_dbg(xhci, "slot_id is zero\n");
+                                       return 0xffffffff;
+                               }
+                               xhci_ring_device(xhci, slot_id);
+                       } else {
+                               int port_status = xhci_readl(xhci,
+                                               port_array[wIndex]);
+                               xhci_warn(xhci, "Port resume took longer than %i msec, port status = 0x%x\n",
+                                               XHCI_MAX_REXIT_TIMEOUT,
+                                               port_status);
+                               status |= USB_PORT_STAT_SUSPEND;
+                               clear_bit(wIndex, &bus_state->rexit_ports);
                        }
-                       xhci_ring_device(xhci, slot_id);
+
                        bus_state->port_c_suspend |= 1 << wIndex;
                        bus_state->suspended_ports &= ~(1 << wIndex);
                } else {
@@ -728,7 +753,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
                        break;
                }
                status = xhci_get_port_status(hcd, bus_state, port_array,
-                               wIndex, temp);
+                               wIndex, temp, flags);
                if (status == 0xffffffff)
                        goto error;
 
index 53b972c2a09f10be38a474042d03757618bc76a4..83bcd13622c3466e655a00166bf6e8076ae198bc 100644 (file)
@@ -2428,6 +2428,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
        for (i = 0; i < USB_MAXCHILDREN; ++i) {
                xhci->bus_state[0].resume_done[i] = 0;
                xhci->bus_state[1].resume_done[i] = 0;
+               /* Only the USB 2.0 completions will ever be used. */
+               init_completion(&xhci->bus_state[1].rexit_done[i]);
        }
 
        if (scratchpad_alloc(xhci, flags))
index c2d495057eb538a74db9a01fc69af12442ca8d45..236c3aabe94083ab5c1829241a46a20d39829587 100644 (file)
@@ -351,7 +351,7 @@ static struct pci_driver xhci_pci_driver = {
        /* suspend and resume implemented later */
 
        .shutdown =     usb_hcd_pci_shutdown,
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_PM
        .driver = {
                .pm = &usb_hcd_pci_pm_ops
        },
index 411da1fc7ae8ad0550df9cecb8d5a4bdab0fa543..6bfbd80ec2b9edfa0a079767381d7d733d8c7035 100644 (file)
@@ -123,6 +123,16 @@ static int enqueue_is_link_trb(struct xhci_ring *ring)
        return TRB_TYPE_LINK_LE32(link->control);
 }
 
+union xhci_trb *xhci_find_next_enqueue(struct xhci_ring *ring)
+{
+       /* Enqueue pointer can be left pointing to the link TRB,
+        * we must handle that
+        */
+       if (TRB_TYPE_LINK_LE32(ring->enqueue->link.control))
+               return ring->enq_seg->next->trbs;
+       return ring->enqueue;
+}
+
 /* Updates trb to point to the next TRB in the ring, and updates seg if the next
  * TRB is in a new segment.  This does not skip over link TRBs, and it does not
  * effect the ring dequeue or enqueue pointers.
@@ -859,8 +869,12 @@ remove_finished_td:
                /* Otherwise ring the doorbell(s) to restart queued transfers */
                ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
        }
-       ep->stopped_td = NULL;
-       ep->stopped_trb = NULL;
+
+       /* Clear stopped_td and stopped_trb if endpoint is not halted */
+       if (!(ep->ep_state & EP_HALTED)) {
+               ep->stopped_td = NULL;
+               ep->stopped_trb = NULL;
+       }
 
        /*
         * Drop the lock and complete the URBs in the cancelled TD list.
@@ -1414,6 +1428,12 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
                        inc_deq(xhci, xhci->cmd_ring);
                        return;
                }
+               /* There is no command to handle if we get a stop event when the
+                * command ring is empty, event->cmd_trb points to the next
+                * unset command
+                */
+               if (xhci->cmd_ring->dequeue == xhci->cmd_ring->enqueue)
+                       return;
        }
 
        switch (le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])
@@ -1743,6 +1763,19 @@ static void handle_port_status(struct xhci_hcd *xhci,
                }
        }
 
+       /*
+        * Check to see if xhci-hub.c is waiting on RExit to U0 transition (or
+        * RExit to a disconnect state).  If so, let the the driver know it's
+        * out of the RExit state.
+        */
+       if (!DEV_SUPERSPEED(temp) &&
+                       test_and_clear_bit(faked_port_index,
+                               &bus_state->rexit_ports)) {
+               complete(&bus_state->rexit_done[faked_port_index]);
+               bogus_port_status = true;
+               goto cleanup;
+       }
+
        if (hcd->speed != HCD_USB3)
                xhci_test_and_clear_bit(xhci, port_array, faked_port_index,
                                        PORT_PLC);
index 49b6edb84a79eccfd5f935db8e03bad0957ee5ad..1e36dbb4836693dbe5ac90b8231361f82645a7b1 100644 (file)
@@ -2598,15 +2598,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
        if (command) {
                cmd_completion = command->completion;
                cmd_status = &command->status;
-               command->command_trb = xhci->cmd_ring->enqueue;
-
-               /* Enqueue pointer can be left pointing to the link TRB,
-                * we must handle that
-                */
-               if (TRB_TYPE_LINK_LE32(command->command_trb->link.control))
-                       command->command_trb =
-                               xhci->cmd_ring->enq_seg->next->trbs;
-
+               command->command_trb = xhci_find_next_enqueue(xhci->cmd_ring);
                list_add_tail(&command->cmd_list, &virt_dev->cmd_list);
        } else {
                cmd_completion = &virt_dev->cmd_completion;
@@ -2614,7 +2606,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
        }
        init_completion(cmd_completion);
 
-       cmd_trb = xhci->cmd_ring->dequeue;
+       cmd_trb = xhci_find_next_enqueue(xhci->cmd_ring);
        if (!ctx_change)
                ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma,
                                udev->slot_id, must_succeed);
@@ -3439,14 +3431,7 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
 
        /* Attempt to submit the Reset Device command to the command ring */
        spin_lock_irqsave(&xhci->lock, flags);
-       reset_device_cmd->command_trb = xhci->cmd_ring->enqueue;
-
-       /* Enqueue pointer can be left pointing to the link TRB,
-        * we must handle that
-        */
-       if (TRB_TYPE_LINK_LE32(reset_device_cmd->command_trb->link.control))
-               reset_device_cmd->command_trb =
-                       xhci->cmd_ring->enq_seg->next->trbs;
+       reset_device_cmd->command_trb = xhci_find_next_enqueue(xhci->cmd_ring);
 
        list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list);
        ret = xhci_queue_reset_device(xhci, slot_id);
@@ -3650,7 +3635,7 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
        union xhci_trb *cmd_trb;
 
        spin_lock_irqsave(&xhci->lock, flags);
-       cmd_trb = xhci->cmd_ring->dequeue;
+       cmd_trb = xhci_find_next_enqueue(xhci->cmd_ring);
        ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0);
        if (ret) {
                spin_unlock_irqrestore(&xhci->lock, flags);
@@ -3785,7 +3770,7 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
                                slot_ctx->dev_info >> 27);
 
        spin_lock_irqsave(&xhci->lock, flags);
-       cmd_trb = xhci->cmd_ring->dequeue;
+       cmd_trb = xhci_find_next_enqueue(xhci->cmd_ring);
        ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma,
                                        udev->slot_id);
        if (ret) {
index 46aa14894148962e3aff6c8028f8d07cbcb07951..289fbfbae7463f429ef051602e08e7eccb3e06e8 100644 (file)
@@ -1412,8 +1412,18 @@ struct xhci_bus_state {
        unsigned long           resume_done[USB_MAXCHILDREN];
        /* which ports have started to resume */
        unsigned long           resuming_ports;
+       /* Which ports are waiting on RExit to U0 transition. */
+       unsigned long           rexit_ports;
+       struct completion       rexit_done[USB_MAXCHILDREN];
 };
 
+
+/*
+ * It can take up to 20 ms to transition from RExit to U0 on the
+ * Intel Lynx Point LP xHCI host.
+ */
+#define        XHCI_MAX_REXIT_TIMEOUT  (20 * 1000)
+
 static inline unsigned int hcd_index(struct usb_hcd *hcd)
 {
        if (hcd->speed == HCD_USB3)
@@ -1840,6 +1850,7 @@ int xhci_cancel_cmd(struct xhci_hcd *xhci, struct xhci_command *command,
                union xhci_trb *cmd_trb);
 void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int slot_id,
                unsigned int ep_index, unsigned int stream_id);
+union xhci_trb *xhci_find_next_enqueue(struct xhci_ring *ring);
 
 /* xHCI roothub code */
 void xhci_set_link_state(struct xhci_hcd *xhci, __le32 __iomem **port_array,
index fc15694d3031bbf83b76e932914e44251280d491..4e8a0405f956c478491a1ed6f18d65a9afc07e9a 100644 (file)
@@ -79,7 +79,7 @@ static struct usb_dpll_params *omap_usb3_get_dpll_params(unsigned long rate)
                        return &dpll_map[i].params;
        }
 
-       return 0;
+       return NULL;
 }
 
 static int omap_usb3_suspend(struct usb_phy *x, int suspend)
index c454bfa22a106184bbaee567e4f49fbad5d8d057..ddb9c51f2c999c04a27aab9d7a2924c429205672 100644 (file)
@@ -60,7 +60,7 @@ config USB_SERIAL_SIMPLE
                - Suunto ANT+ USB device.
                - Fundamental Software dongle.
                - HP4x calculators
-               - a number of Motoroloa phones
+               - a number of Motorola phones
                - Siemens USB/MPI adapter.
                - ViVOtech ViVOpay USB device.
                - Infineon Modem Flashloader USB interface
index e7a84f0f517969fec4bbb59ac270eeb97fbf22d3..bedf8e47713be02dfc80b8a2e3512cb24a455a15 100644 (file)
@@ -139,6 +139,7 @@ enum pl2303_type {
        HX_TA,          /* HX(A) / X(A) / TA version  */ /* TODO: improve */
        HXD_EA_RA_SA,   /* HXD / EA / RA / SA version */ /* TODO: improve */
        TB,             /* TB version */
+       HX_CLONE,       /* Cheap and less functional clone of the HX chip */
 };
 /*
  * NOTE: don't know the difference between type 0 and type 1,
@@ -206,8 +207,23 @@ static int pl2303_startup(struct usb_serial *serial)
                 * the device descriptors of the X/HX, HXD, EA, RA, SA, TA, TB
                 */
                if (le16_to_cpu(serial->dev->descriptor.bcdDevice) == 0x300) {
-                       type = HX_TA;
-                       type_str = "X/HX/TA";
+                       /* Check if the device is a clone */
+                       pl2303_vendor_read(0x9494, 0, serial, buf);
+                       /*
+                        * NOTE: Not sure if this read is really needed.
+                        * The HX returns 0x00, the clone 0x02, but the Windows
+                        * driver seems to ignore the value and continues.
+                        */
+                       pl2303_vendor_write(0x0606, 0xaa, serial);
+                       pl2303_vendor_read(0x8686, 0, serial, buf);
+                       if (buf[0] != 0xaa) {
+                               type = HX_CLONE;
+                               type_str = "X/HX clone (limited functionality)";
+                       } else {
+                               type = HX_TA;
+                               type_str = "X/HX/TA";
+                       }
+                       pl2303_vendor_write(0x0606, 0x00, serial);
                } else if (le16_to_cpu(serial->dev->descriptor.bcdDevice)
                                                                     == 0x400) {
                        type = HXD_EA_RA_SA;
@@ -305,8 +321,9 @@ static int pl2303_baudrate_encode_direct(int baud, enum pl2303_type type,
 {
        /*
         * NOTE: Only the values defined in baud_sup are supported !
-        *       => if unsupported values are set, the PL2303 seems to
-        *          use 9600 baud (at least my PL2303X always does)
+        * => if unsupported values are set, the PL2303 uses 9600 baud instead
+        * => HX clones just don't work at unsupported baud rates < 115200 baud,
+        *    for baud rates > 115200 they run at 115200 baud
         */
        const int baud_sup[] = { 75, 150, 300, 600, 1200, 1800, 2400, 3600,
                                 4800, 7200, 9600, 14400, 19200, 28800, 38400,
@@ -316,14 +333,14 @@ static int pl2303_baudrate_encode_direct(int baud, enum pl2303_type type,
         * NOTE: With the exception of type_0/1 devices, the following
         * additional baud rates are supported (tested with HX rev. 3A only):
         * 110*, 56000*, 128000, 134400, 161280, 201600, 256000*, 268800,
-        * 403200, 806400.      (*: not HX)
+        * 403200, 806400.      (*: not HX and HX clones)
         *
         * Maximum values: HXD, TB: 12000000; HX, TA: 6000000;
-        *                 type_0+1: 1228800; RA: 921600; SA: 115200
+        *                 type_0+1: 1228800; RA: 921600; HX clones, SA: 115200
         *
         * As long as we are not using this encoding method for anything else
-        * than the type_0+1 and HX chips, there is no point in complicating
-        * the code to support them.
+        * than the type_0+1, HX and HX clone chips, there is no point in
+        * complicating the code to support them.
         */
        int i;
 
@@ -347,6 +364,8 @@ static int pl2303_baudrate_encode_direct(int baud, enum pl2303_type type,
                baud = min_t(int, baud, 6000000);
        else if (type == type_0 || type == type_1)
                baud = min_t(int, baud, 1228800);
+       else if (type == HX_CLONE)
+               baud = min_t(int, baud, 115200);
        /* Direct (standard) baud rate encoding method */
        put_unaligned_le32(baud, buf);
 
@@ -359,7 +378,8 @@ static int pl2303_baudrate_encode_divisor(int baud, enum pl2303_type type,
        /*
         * Divisor based baud rate encoding method
         *
-        * NOTE: it's not clear if the type_0/1 chips support this method
+        * NOTE: HX clones do NOT support this method.
+        * It's not clear if the type_0/1 chips support it.
         *
         * divisor = 12MHz * 32 / baudrate = 2^A * B
         *
@@ -452,7 +472,7 @@ static void pl2303_encode_baudrate(struct tty_struct *tty,
         * 1) Direct method: encodes the baud rate value directly
         *    => supported by all chip types
         * 2) Divisor based method: encodes a divisor to a base value (12MHz*32)
-        *    => supported by HX chips (and likely not by type_0/1 chips)
+        *    => not supported by HX clones (and likely type_0/1 chips)
         *
         * NOTE: Although the divisor based baud rate encoding method is much
         * more flexible, some of the standard baud rate values can not be
@@ -460,7 +480,7 @@ static void pl2303_encode_baudrate(struct tty_struct *tty,
         * the device likely uses the same baud rate generator for both methods
         * so that there is likley no difference.
         */
-       if (type == type_0 || type == type_1)
+       if (type == type_0 || type == type_1 || type == HX_CLONE)
                baud = pl2303_baudrate_encode_direct(baud, type, buf);
        else
                baud = pl2303_baudrate_encode_divisor(baud, type, buf);
@@ -813,6 +833,7 @@ static void pl2303_break_ctl(struct tty_struct *tty, int break_state)
        result = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
                                 BREAK_REQUEST, BREAK_REQUEST_TYPE, state,
                                 0, NULL, 0, 100);
+       /* NOTE: HX clones don't support sending breaks, -EPIPE is returned */
        if (result)
                dev_err(&port->dev, "error sending break = %d\n", result);
 }
index 4b79a1f2f901e86eb7726fd64d1af3bac66e7385..592b31698fc8bf7c8033b8eb4f15d98b2b708ad2 100644 (file)
@@ -461,7 +461,7 @@ static void tcm_vhost_release_cmd(struct se_cmd *se_cmd)
                u32 i;
                for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
                        put_page(sg_page(&tv_cmd->tvc_sgl[i]));
-        }
+       }
 
        tcm_vhost_put_inflight(tv_cmd->inflight);
        percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
@@ -1373,21 +1373,30 @@ static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
        return 0;
 }
 
+static void vhost_scsi_free(struct vhost_scsi *vs)
+{
+       if (is_vmalloc_addr(vs))
+               vfree(vs);
+       else
+               kfree(vs);
+}
+
 static int vhost_scsi_open(struct inode *inode, struct file *f)
 {
        struct vhost_scsi *vs;
        struct vhost_virtqueue **vqs;
-       int r, i;
+       int r = -ENOMEM, i;
 
-       vs = kzalloc(sizeof(*vs), GFP_KERNEL);
-       if (!vs)
-               return -ENOMEM;
+       vs = kzalloc(sizeof(*vs), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
+       if (!vs) {
+               vs = vzalloc(sizeof(*vs));
+               if (!vs)
+                       goto err_vs;
+       }
 
        vqs = kmalloc(VHOST_SCSI_MAX_VQ * sizeof(*vqs), GFP_KERNEL);
-       if (!vqs) {
-               kfree(vs);
-               return -ENOMEM;
-       }
+       if (!vqs)
+               goto err_vqs;
 
        vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work);
        vhost_work_init(&vs->vs_event_work, tcm_vhost_evt_work);
@@ -1407,14 +1416,18 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
 
        tcm_vhost_init_inflight(vs, NULL);
 
-       if (r < 0) {
-               kfree(vqs);
-               kfree(vs);
-               return r;
-       }
+       if (r < 0)
+               goto err_init;
 
        f->private_data = vs;
        return 0;
+
+err_init:
+       kfree(vqs);
+err_vqs:
+       vhost_scsi_free(vs);
+err_vs:
+       return r;
 }
 
 static int vhost_scsi_release(struct inode *inode, struct file *f)
@@ -1431,7 +1444,7 @@ static int vhost_scsi_release(struct inode *inode, struct file *f)
        /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
        vhost_scsi_flush(vs);
        kfree(vs->dev.vqs);
-       kfree(vs);
+       vhost_scsi_free(vs);
        return 0;
 }
 
index 9a9502a4aa5089519d338ca62b07a557069fb00d..69068e0d8f31af183c075afe3026d60b09a0b55e 100644 (file)
@@ -161,9 +161,11 @@ void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
        if (list_empty(&work->node)) {
                list_add_tail(&work->node, &dev->work_list);
                work->queue_seq++;
+               spin_unlock_irqrestore(&dev->work_lock, flags);
                wake_up_process(dev->worker);
+       } else {
+               spin_unlock_irqrestore(&dev->work_lock, flags);
        }
-       spin_unlock_irqrestore(&dev->work_lock, flags);
 }
 EXPORT_SYMBOL_GPL(vhost_work_queue);
 
index 75dca19bf2149a968f733ed1ff8a0cd468baa745..6ac755270ab46d1a3ef830c89338c0daab519664 100644 (file)
@@ -514,7 +514,7 @@ static int mmphw_probe(struct platform_device *pdev)
        if (IS_ERR(ctrl->clk)) {
                dev_err(ctrl->dev, "unable to get clk %s\n", mi->clk_name);
                ret = -ENOENT;
-               goto failed_get_clk;
+               goto failed;
        }
        clk_prepare_enable(ctrl->clk);
 
@@ -551,21 +551,8 @@ failed_path_init:
                path_deinit(path_plat);
        }
 
-       if (ctrl->clk) {
-               devm_clk_put(ctrl->dev, ctrl->clk);
-               clk_disable_unprepare(ctrl->clk);
-       }
-failed_get_clk:
-       devm_free_irq(ctrl->dev, ctrl->irq, ctrl);
+       clk_disable_unprepare(ctrl->clk);
 failed:
-       if (ctrl) {
-               if (ctrl->reg_base)
-                       devm_iounmap(ctrl->dev, ctrl->reg_base);
-               devm_release_mem_region(ctrl->dev, res->start,
-                               resource_size(res));
-               devm_kfree(ctrl->dev, ctrl);
-       }
-
        dev_err(&pdev->dev, "device init failed\n");
 
        return ret;
index d250ed0f806d3bf66d858e9f633852f3baa58f4d..27197a8048c0a7aa9ffcbd3c8a2280ae12866dcc 100644 (file)
@@ -620,6 +620,7 @@ static int mxsfb_restore_mode(struct mxsfb_info *host)
                break;
        case 3:
                bits_per_pixel = 32;
+               break;
        case 1:
        default:
                return -EINVAL;
index 7ef079c146e7242c9edee33cc43bc0cd3c850ce1..c172a5281f9e6c9369b0cec236e3bbf2abb875ac 100644 (file)
@@ -2075,6 +2075,7 @@ static int neofb_probe(struct pci_dev *dev, const struct pci_device_id *id)
        if (!fb_find_mode(&info->var, info, mode_option, NULL, 0,
                        info->monspecs.modedb, 16)) {
                printk(KERN_ERR "neofb: Unable to find usable video mode.\n");
+               err = -EINVAL;
                goto err_map_video;
        }
 
@@ -2097,7 +2098,8 @@ static int neofb_probe(struct pci_dev *dev, const struct pci_device_id *id)
               info->fix.smem_len >> 10, info->var.xres,
               info->var.yres, h_sync / 1000, h_sync % 1000, v_sync);
 
-       if (fb_alloc_cmap(&info->cmap, 256, 0) < 0)
+       err = fb_alloc_cmap(&info->cmap, 256, 0);
+       if (err < 0)
                goto err_map_video;
 
        err = register_framebuffer(info);
index 171821ddd78de381a866e9912bece023a8fcaca5..ba5b40f581f6fdfe1849a3d4d62c8f38ea65dab1 100644 (file)
@@ -120,7 +120,7 @@ int of_get_display_timing(struct device_node *np, const char *name,
                return -EINVAL;
        }
 
-       timing_np = of_find_node_by_name(np, name);
+       timing_np = of_get_child_by_name(np, name);
        if (!timing_np) {
                pr_err("%s: could not find node '%s'\n",
                        of_node_full_name(np), name);
@@ -143,11 +143,11 @@ struct display_timings *of_get_display_timings(struct device_node *np)
        struct display_timings *disp;
 
        if (!np) {
-               pr_err("%s: no devicenode given\n", of_node_full_name(np));
+               pr_err("%s: no device node given\n", of_node_full_name(np));
                return NULL;
        }
 
-       timings_np = of_find_node_by_name(np, "display-timings");
+       timings_np = of_get_child_by_name(np, "display-timings");
        if (!timings_np) {
                pr_err("%s: could not find display-timings node\n",
                        of_node_full_name(np));
index 6c90885b094020f33b74d591db6a93edcbd4aeb7..10b25e7cd878c6e7a5657fd8c1a4865b0935c004 100644 (file)
@@ -35,6 +35,7 @@ config DISPLAY_PANEL_DPI
 
 config DISPLAY_PANEL_DSI_CM
        tristate "Generic DSI Command Mode Panel"
+       depends on BACKLIGHT_CLASS_DEVICE
        help
          Driver for generic DSI command mode panels.
 
index 1b60698f141ed983e2661743f2a5c79f471a02a6..ccd9073f706f6d59242a5b1866d74dcd2c9bffa6 100644 (file)
@@ -191,7 +191,7 @@ static int tvc_probe_pdata(struct platform_device *pdev)
        in = omap_dss_find_output(pdata->source);
        if (in == NULL) {
                dev_err(&pdev->dev, "Failed to find video source\n");
-               return -ENODEV;
+               return -EPROBE_DEFER;
        }
 
        ddata->in = in;
index bc5f8ceda371b1b26308db7c644c6913445c8b18..63d88ee6dfe410469215455795bd4b63b031a040 100644 (file)
@@ -263,7 +263,7 @@ static int dvic_probe_pdata(struct platform_device *pdev)
        in = omap_dss_find_output(pdata->source);
        if (in == NULL) {
                dev_err(&pdev->dev, "Failed to find video source\n");
-               return -ENODEV;
+               return -EPROBE_DEFER;
        }
 
        ddata->in = in;
index c5826716d6abbb8b28e85ccd1d70aaa8c0058b44..9abe2c039ae9c44f0ea2cb2a3362c021e9273df4 100644 (file)
@@ -290,7 +290,7 @@ static int hdmic_probe_pdata(struct platform_device *pdev)
        in = omap_dss_find_output(pdata->source);
        if (in == NULL) {
                dev_err(&pdev->dev, "Failed to find video source\n");
-               return -ENODEV;
+               return -EPROBE_DEFER;
        }
 
        ddata->in = in;
index 02a7340111dfbf4b856122aae670bdbff2c34150..477975009eee87e89c34cc773e2d5a8818f918d6 100644 (file)
@@ -3691,6 +3691,7 @@ static int __init omap_dispchw_probe(struct platform_device *pdev)
        }
 
        pm_runtime_enable(&pdev->dev);
+       pm_runtime_irq_safe(&pdev->dev);
 
        r = dispc_runtime_get();
        if (r)
index 47ca86c5c6c0b3d1d609a001210e5eb132616076..d838ba829459400acc86388cbeda1bab77f57b24 100644 (file)
@@ -1336,14 +1336,7 @@ static int s3_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
                        (info->var.bits_per_pixel * info->var.xres_virtual);
        if (info->var.yres_virtual < info->var.yres) {
                dev_err(info->device, "virtual vertical size smaller than real\n");
-               goto err_find_mode;
-       }
-
-       /* maximize virtual vertical size for fast scrolling */
-       info->var.yres_virtual = info->fix.smem_len * 8 /
-                       (info->var.bits_per_pixel * info->var.xres_virtual);
-       if (info->var.yres_virtual < info->var.yres) {
-               dev_err(info->device, "virtual vertical size smaller than real\n");
+               rc = -EINVAL;
                goto err_find_mode;
        }
 
index a50c6e3a7cc4824db07f43e84732ee64a58b65cb..b232908a61925724bb61bc0f8a09ecbca6b753e6 100644 (file)
@@ -398,8 +398,6 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
        if (nr_pages > ARRAY_SIZE(frame_list))
                nr_pages = ARRAY_SIZE(frame_list);
 
-       scratch_page = get_balloon_scratch_page();
-
        for (i = 0; i < nr_pages; i++) {
                page = alloc_page(gfp);
                if (page == NULL) {
@@ -413,6 +411,12 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
 
                scrub_page(page);
 
+               /*
+                * Ballooned out frames are effectively replaced with
+                * a scratch frame.  Ensure direct mappings and the
+                * p2m are consistent.
+                */
+               scratch_page = get_balloon_scratch_page();
 #ifdef CONFIG_XEN_HAVE_PVMMU
                if (xen_pv_domain() && !PageHighMem(page)) {
                        ret = HYPERVISOR_update_va_mapping(
@@ -422,24 +426,19 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
                        BUG_ON(ret);
                }
 #endif
-       }
-
-       /* Ensure that ballooned highmem pages don't have kmaps. */
-       kmap_flush_unused();
-       flush_tlb_all();
-
-       /* No more mappings: invalidate P2M and add to balloon. */
-       for (i = 0; i < nr_pages; i++) {
-               pfn = mfn_to_pfn(frame_list[i]);
                if (!xen_feature(XENFEAT_auto_translated_physmap)) {
                        unsigned long p;
                        p = page_to_pfn(scratch_page);
                        __set_phys_to_machine(pfn, pfn_to_mfn(p));
                }
+               put_balloon_scratch_page();
+
                balloon_append(pfn_to_page(pfn));
        }
 
-       put_balloon_scratch_page();
+       /* Ensure that ballooned highmem pages don't have kmaps. */
+       kmap_flush_unused();
+       flush_tlb_all();
 
        set_xen_guest_handle(reservation.extent_start, frame_list);
        reservation.nr_extents   = nr_pages;
index 646337dc5201e702227309cc17db99f96af99173..529300327f4574d2dc36345be3c7398442548e08 100644 (file)
@@ -600,9 +600,6 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
 
        /* lock down the parent dentry so we can peer at it */
        parent = dget_parent(dentry);
-       if (!parent->d_inode)
-               goto out_bad;
-
        dir = AFS_FS_I(parent->d_inode);
 
        /* validate the parent directory */
index 100edcc5e3122323eb8f4087305e623c666eceb0..4c94a79991bb6d8ae0d2e12ae8c647e2fe22f7fe 100644 (file)
@@ -1413,7 +1413,7 @@ static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
  *   long file_ofs
  * followed by COUNT filenames in ASCII: "FILE1" NUL "FILE2" NUL...
  */
-static void fill_files_note(struct memelfnote *note)
+static int fill_files_note(struct memelfnote *note)
 {
        struct vm_area_struct *vma;
        unsigned count, size, names_ofs, remaining, n;
@@ -1428,11 +1428,11 @@ static void fill_files_note(struct memelfnote *note)
        names_ofs = (2 + 3 * count) * sizeof(data[0]);
  alloc:
        if (size >= MAX_FILE_NOTE_SIZE) /* paranoia check */
-               goto err;
+               return -EINVAL;
        size = round_up(size, PAGE_SIZE);
        data = vmalloc(size);
        if (!data)
-               goto err;
+               return -ENOMEM;
 
        start_end_ofs = data + 2;
        name_base = name_curpos = ((char *)data) + names_ofs;
@@ -1485,7 +1485,7 @@ static void fill_files_note(struct memelfnote *note)
 
        size = name_curpos - (char *)data;
        fill_note(note, "CORE", NT_FILE, size, data);
err: ;
      return 0;
 }
 
 #ifdef CORE_DUMP_USE_REGSET
@@ -1686,8 +1686,8 @@ static int fill_note_info(struct elfhdr *elf, int phdrs,
        fill_auxv_note(&info->auxv, current->mm);
        info->size += notesize(&info->auxv);
 
-       fill_files_note(&info->files);
-       info->size += notesize(&info->files);
+       if (fill_files_note(&info->files) == 0)
+               info->size += notesize(&info->files);
 
        return 1;
 }
@@ -1719,7 +1719,8 @@ static int write_note_info(struct elf_note_info *info,
                        return 0;
                if (first && !writenote(&info->auxv, file, foffset))
                        return 0;
-               if (first && !writenote(&info->files, file, foffset))
+               if (first && info->files.data &&
+                               !writenote(&info->files, file, foffset))
                        return 0;
 
                for (i = 1; i < info->thread_notes; ++i)
@@ -1806,6 +1807,7 @@ static int elf_dump_thread_status(long signr, struct elf_thread_status *t)
 
 struct elf_note_info {
        struct memelfnote *notes;
+       struct memelfnote *notes_files;
        struct elf_prstatus *prstatus;  /* NT_PRSTATUS */
        struct elf_prpsinfo *psinfo;    /* NT_PRPSINFO */
        struct list_head thread_list;
@@ -1896,9 +1898,12 @@ static int fill_note_info(struct elfhdr *elf, int phdrs,
 
        fill_siginfo_note(info->notes + 2, &info->csigdata, siginfo);
        fill_auxv_note(info->notes + 3, current->mm);
-       fill_files_note(info->notes + 4);
+       info->numnote = 4;
 
-       info->numnote = 5;
+       if (fill_files_note(info->notes + info->numnote) == 0) {
+               info->notes_files = info->notes + info->numnote;
+               info->numnote++;
+       }
 
        /* Try to dump the FPU. */
        info->prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs,
@@ -1960,8 +1965,9 @@ static void free_note_info(struct elf_note_info *info)
                kfree(list_entry(tmp, struct elf_thread_status, list));
        }
 
-       /* Free data allocated by fill_files_note(): */
-       vfree(info->notes[4].data);
+       /* Free data possibly allocated by fill_files_note(): */
+       if (info->notes_files)
+               vfree(info->notes_files->data);
 
        kfree(info->prstatus);
        kfree(info->psinfo);
@@ -2044,7 +2050,7 @@ static int elf_core_dump(struct coredump_params *cprm)
        struct vm_area_struct *vma, *gate_vma;
        struct elfhdr *elf = NULL;
        loff_t offset = 0, dataoff, foffset;
-       struct elf_note_info info;
+       struct elf_note_info info = { };
        struct elf_phdr *phdr4note = NULL;
        struct elf_shdr *shdr4extnum = NULL;
        Elf_Half e_phnum;
index 60250847929fcd0421e5d72b9655bbe4c3a785e1..fc60b31453eefbbdcc234c7df78c5504da655980 100644 (file)
@@ -735,7 +735,7 @@ void bioset_integrity_free(struct bio_set *bs)
                mempool_destroy(bs->bio_integrity_pool);
 
        if (bs->bvec_integrity_pool)
-               mempool_destroy(bs->bio_integrity_pool);
+               mempool_destroy(bs->bvec_integrity_pool);
 }
 EXPORT_SYMBOL(bioset_integrity_free);
 
index b3b20ed9510e5ccc285195cce7aa7e3f524ed063..ea5035da4d9a0cd9fd6f5f657bb01272c63175c3 100644 (file)
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -917,8 +917,8 @@ void bio_copy_data(struct bio *dst, struct bio *src)
                src_p = kmap_atomic(src_bv->bv_page);
                dst_p = kmap_atomic(dst_bv->bv_page);
 
-               memcpy(dst_p + dst_bv->bv_offset,
-                      src_p + src_bv->bv_offset,
+               memcpy(dst_p + dst_offset,
+                      src_p + src_offset,
                       bytes);
 
                kunmap_atomic(dst_p);
index d0ae226926ee2d2f43d29220da8fd531f31e8687..71f074e1870b2e9fe183cf338ad2515314155e6e 100644 (file)
@@ -213,7 +213,10 @@ static inline bool btrfs_is_free_space_inode(struct inode *inode)
 static inline int btrfs_inode_in_log(struct inode *inode, u64 generation)
 {
        if (BTRFS_I(inode)->logged_trans == generation &&
-           BTRFS_I(inode)->last_sub_trans <= BTRFS_I(inode)->last_log_commit)
+           BTRFS_I(inode)->last_sub_trans <=
+           BTRFS_I(inode)->last_log_commit &&
+           BTRFS_I(inode)->last_sub_trans <=
+           BTRFS_I(inode)->root->last_log_commit)
                return 1;
        return 0;
 }
index 64346721173f24f31170a8e70e85f7d49a83b609..61b5bcd57b7e320624c2778d2051db03b79f2282 100644 (file)
@@ -1005,8 +1005,11 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
                return ret;
        }
 
-       if (root->ref_cows)
-               btrfs_reloc_cow_block(trans, root, buf, cow);
+       if (root->ref_cows) {
+               ret = btrfs_reloc_cow_block(trans, root, buf, cow);
+               if (ret)
+                       return ret;
+       }
 
        if (buf == root->node) {
                WARN_ON(parent && parent != buf);
index 3c1da6f98a4d25666f4a2afb6fce25bff80ac8c8..0506f40ede8331f8ab7b20a4d2778e49c886fe91 100644 (file)
@@ -1118,15 +1118,6 @@ struct btrfs_space_info {
         */
        struct percpu_counter total_bytes_pinned;
 
-       /*
-        * we bump reservation progress every time we decrement
-        * bytes_reserved.  This way people waiting for reservations
-        * know something good has happened and they can check
-        * for progress.  The number here isn't to be trusted, it
-        * just shows reclaim activity
-        */
-       unsigned long reservation_progress;
-
        unsigned int full:1;    /* indicates that we cannot allocate any more
                                   chunks for this space */
        unsigned int chunk_alloc:1;     /* set if we are allocating a chunk */
@@ -3135,7 +3126,7 @@ static inline u64 btrfs_calc_trans_metadata_size(struct btrfs_root *root,
                                                 unsigned num_items)
 {
        return (root->leafsize + root->nodesize * (BTRFS_MAX_LEVEL - 1)) *
-               3 * num_items;
+               2 * num_items;
 }
 
 /*
@@ -3939,9 +3930,9 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
                            struct btrfs_root *root);
 int btrfs_recover_relocation(struct btrfs_root *root);
 int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len);
-void btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
-                          struct btrfs_root *root, struct extent_buffer *buf,
-                          struct extent_buffer *cow);
+int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
+                         struct btrfs_root *root, struct extent_buffer *buf,
+                         struct extent_buffer *cow);
 void btrfs_reloc_pre_snapshot(struct btrfs_trans_handle *trans,
                              struct btrfs_pending_snapshot *pending,
                              u64 *bytes_to_reserve);
index a64435359385e86a483f30696c932da2c8d5bdb0..70681686e8dc57eb64b51429429150245babdb3b 100644 (file)
@@ -400,7 +400,7 @@ int btrfs_dev_replace_start(struct btrfs_root *root,
        args->result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR;
        btrfs_dev_replace_unlock(dev_replace);
 
-       btrfs_wait_all_ordered_extents(root->fs_info, 0);
+       btrfs_wait_all_ordered_extents(root->fs_info);
 
        /* force writing the updated state information to disk */
        trans = btrfs_start_transaction(root, 0);
@@ -475,7 +475,7 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
                mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
                return ret;
        }
-       btrfs_wait_all_ordered_extents(root->fs_info, 0);
+       btrfs_wait_all_ordered_extents(root->fs_info);
 
        trans = btrfs_start_transaction(root, 0);
        if (IS_ERR(trans)) {
index 4cbb00af92ff3bed86561b9f02ec6fbcc04665b2..4ae17ed13b3274f228c8879383ec5124ca5f05dd 100644 (file)
@@ -157,6 +157,7 @@ static struct btrfs_lockdep_keyset {
        { .id = BTRFS_TREE_LOG_OBJECTID,        .name_stem = "log"      },
        { .id = BTRFS_TREE_RELOC_OBJECTID,      .name_stem = "treloc"   },
        { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, .name_stem = "dreloc"   },
+       { .id = BTRFS_UUID_TREE_OBJECTID,       .name_stem = "uuid"     },
        { .id = 0,                              .name_stem = "tree"     },
 };
 
@@ -3415,6 +3416,7 @@ static int write_all_supers(struct btrfs_root *root, int max_mirrors)
        if (total_errors > max_errors) {
                printk(KERN_ERR "btrfs: %d errors while writing supers\n",
                       total_errors);
+               mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
 
                /* FUA is masked off if unsupported and can't be the reason */
                btrfs_error(root->fs_info, -EIO,
index cfb3cf711b34d6555afed3d21dd16480c662cc55..d58bef130a41984ac7e3172aad43eb87547af64b 100644 (file)
@@ -3925,7 +3925,6 @@ static int can_overcommit(struct btrfs_root *root,
        u64 space_size;
        u64 avail;
        u64 used;
-       u64 to_add;
 
        used = space_info->bytes_used + space_info->bytes_reserved +
                space_info->bytes_pinned + space_info->bytes_readonly;
@@ -3959,25 +3958,17 @@ static int can_overcommit(struct btrfs_root *root,
                       BTRFS_BLOCK_GROUP_RAID10))
                avail >>= 1;
 
-       to_add = space_info->total_bytes;
-
        /*
         * If we aren't flushing all things, let us overcommit up to
         * 1/2th of the space. If we can flush, don't let us overcommit
         * too much, let it overcommit up to 1/8 of the space.
         */
        if (flush == BTRFS_RESERVE_FLUSH_ALL)
-               to_add >>= 3;
+               avail >>= 3;
        else
-               to_add >>= 1;
-
-       /*
-        * Limit the overcommit to the amount of free space we could possibly
-        * allocate for chunks.
-        */
-       to_add = min(avail, to_add);
+               avail >>= 1;
 
-       if (used + bytes < space_info->total_bytes + to_add)
+       if (used + bytes < space_info->total_bytes + avail)
                return 1;
        return 0;
 }
@@ -4000,7 +3991,7 @@ static void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
                 */
                btrfs_start_all_delalloc_inodes(root->fs_info, 0);
                if (!current->journal_info)
-                       btrfs_wait_all_ordered_extents(root->fs_info, 0);
+                       btrfs_wait_all_ordered_extents(root->fs_info);
        }
 }
 
@@ -4030,7 +4021,7 @@ static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
        if (delalloc_bytes == 0) {
                if (trans)
                        return;
-               btrfs_wait_all_ordered_extents(root->fs_info, 0);
+               btrfs_wait_all_ordered_extents(root->fs_info);
                return;
        }
 
@@ -4058,7 +4049,7 @@ static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
 
                loops++;
                if (wait_ordered && !trans) {
-                       btrfs_wait_all_ordered_extents(root->fs_info, 0);
+                       btrfs_wait_all_ordered_extents(root->fs_info);
                } else {
                        time_left = schedule_timeout_killable(1);
                        if (time_left)
@@ -4465,7 +4456,6 @@ static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
                        space_info->bytes_may_use -= num_bytes;
                        trace_btrfs_space_reservation(fs_info, "space_info",
                                        space_info->flags, num_bytes, 0);
-                       space_info->reservation_progress++;
                        spin_unlock(&space_info->lock);
                }
        }
@@ -4666,7 +4656,6 @@ static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
                sinfo->bytes_may_use -= num_bytes;
                trace_btrfs_space_reservation(fs_info, "space_info",
                                      sinfo->flags, num_bytes, 0);
-               sinfo->reservation_progress++;
                block_rsv->reserved = block_rsv->size;
                block_rsv->full = 1;
        }
@@ -5446,7 +5435,6 @@ static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
                        space_info->bytes_readonly += num_bytes;
                cache->reserved -= num_bytes;
                space_info->bytes_reserved -= num_bytes;
-               space_info->reservation_progress++;
        }
        spin_unlock(&cache->lock);
        spin_unlock(&space_info->lock);
@@ -6117,10 +6105,13 @@ enum btrfs_loop_type {
 /*
  * walks the btree of allocated extents and find a hole of a given size.
  * The key ins is changed to record the hole:
- * ins->objectid == block start
+ * ins->objectid == start position
  * ins->flags = BTRFS_EXTENT_ITEM_KEY
- * ins->offset == number of blocks
+ * ins->offset == the size of the hole.
  * Any available blocks before search_start are skipped.
+ *
+ * If there is no suitable free space, we will record the max size of
+ * the free space extent currently.
  */
 static noinline int find_free_extent(struct btrfs_root *orig_root,
                                     u64 num_bytes, u64 empty_size,
@@ -6133,6 +6124,7 @@ static noinline int find_free_extent(struct btrfs_root *orig_root,
        struct btrfs_block_group_cache *block_group = NULL;
        struct btrfs_block_group_cache *used_block_group;
        u64 search_start = 0;
+       u64 max_extent_size = 0;
        int empty_cluster = 2 * 1024 * 1024;
        struct btrfs_space_info *space_info;
        int loop = 0;
@@ -6292,7 +6284,10 @@ have_block_group:
                                btrfs_get_block_group(used_block_group);
 
                        offset = btrfs_alloc_from_cluster(used_block_group,
-                         last_ptr, num_bytes, used_block_group->key.objectid);
+                                               last_ptr,
+                                               num_bytes,
+                                               used_block_group->key.objectid,
+                                               &max_extent_size);
                        if (offset) {
                                /* we have a block, we're done */
                                spin_unlock(&last_ptr->refill_lock);
@@ -6355,8 +6350,10 @@ refill_cluster:
                                 * cluster
                                 */
                                offset = btrfs_alloc_from_cluster(block_group,
-                                                 last_ptr, num_bytes,
-                                                 search_start);
+                                                       last_ptr,
+                                                       num_bytes,
+                                                       search_start,
+                                                       &max_extent_size);
                                if (offset) {
                                        /* we found one, proceed */
                                        spin_unlock(&last_ptr->refill_lock);
@@ -6391,13 +6388,18 @@ unclustered_alloc:
                if (cached &&
                    block_group->free_space_ctl->free_space <
                    num_bytes + empty_cluster + empty_size) {
+                       if (block_group->free_space_ctl->free_space >
+                           max_extent_size)
+                               max_extent_size =
+                                       block_group->free_space_ctl->free_space;
                        spin_unlock(&block_group->free_space_ctl->tree_lock);
                        goto loop;
                }
                spin_unlock(&block_group->free_space_ctl->tree_lock);
 
                offset = btrfs_find_space_for_alloc(block_group, search_start,
-                                                   num_bytes, empty_size);
+                                                   num_bytes, empty_size,
+                                                   &max_extent_size);
                /*
                 * If we didn't find a chunk, and we haven't failed on this
                 * block group before, and this block group is in the middle of
@@ -6515,7 +6517,8 @@ loop:
                ret = 0;
        }
 out:
-
+       if (ret == -ENOSPC)
+               ins->offset = max_extent_size;
        return ret;
 }
 
@@ -6573,8 +6576,8 @@ again:
                               flags);
 
        if (ret == -ENOSPC) {
-               if (!final_tried) {
-                       num_bytes = num_bytes >> 1;
+               if (!final_tried && ins->offset) {
+                       num_bytes = min(num_bytes >> 1, ins->offset);
                        num_bytes = round_down(num_bytes, root->sectorsize);
                        num_bytes = max(num_bytes, min_alloc_size);
                        if (num_bytes == min_alloc_size)
index 09582b81640cc8e02ced7ea7c8ec6b22acdc8dba..c09a40db53dbf8dff60ed9c942c217ca6546b058 100644 (file)
@@ -1481,10 +1481,12 @@ static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
                *end = state->end;
                cur_start = state->end + 1;
                node = rb_next(node);
-               if (!node)
-                       break;
                total_bytes += state->end - state->start + 1;
-               if (total_bytes >= max_bytes)
+               if (total_bytes >= max_bytes) {
+                       *end = *start + max_bytes - 1;
+                       break;
+               }
+               if (!node)
                        break;
        }
 out:
index bc5072b2db537f0f27af1851532b7417b41a8489..72da4df53c9a224d7a5106a907fc4ba4e08f5ebb 100644 (file)
@@ -1859,8 +1859,8 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
 
        ret = btrfs_log_dentry_safe(trans, root, dentry);
        if (ret < 0) {
-               mutex_unlock(&inode->i_mutex);
-               goto out;
+               /* Fallthrough and commit/free transaction. */
+               ret = 1;
        }
 
        /* we've logged all the items and now have a consistent
index 3f0ddfce96e6ff2bdaaef019ea7ee00f9af070af..b4f9904c4c6b2ed5f0da30ad664baf33100a879c 100644 (file)
@@ -1431,13 +1431,19 @@ static void bitmap_set_bits(struct btrfs_free_space_ctl *ctl,
        ctl->free_space += bytes;
 }
 
+/*
+ * If we can not find suitable extent, we will use bytes to record
+ * the size of the max extent.
+ */
 static int search_bitmap(struct btrfs_free_space_ctl *ctl,
                         struct btrfs_free_space *bitmap_info, u64 *offset,
                         u64 *bytes)
 {
        unsigned long found_bits = 0;
+       unsigned long max_bits = 0;
        unsigned long bits, i;
        unsigned long next_zero;
+       unsigned long extent_bits;
 
        i = offset_to_bit(bitmap_info->offset, ctl->unit,
                          max_t(u64, *offset, bitmap_info->offset));
@@ -1446,9 +1452,12 @@ static int search_bitmap(struct btrfs_free_space_ctl *ctl,
        for_each_set_bit_from(i, bitmap_info->bitmap, BITS_PER_BITMAP) {
                next_zero = find_next_zero_bit(bitmap_info->bitmap,
                                               BITS_PER_BITMAP, i);
-               if ((next_zero - i) >= bits) {
-                       found_bits = next_zero - i;
+               extent_bits = next_zero - i;
+               if (extent_bits >= bits) {
+                       found_bits = extent_bits;
                        break;
+               } else if (extent_bits > max_bits) {
+                       max_bits = extent_bits;
                }
                i = next_zero;
        }
@@ -1459,38 +1468,41 @@ static int search_bitmap(struct btrfs_free_space_ctl *ctl,
                return 0;
        }
 
+       *bytes = (u64)(max_bits) * ctl->unit;
        return -1;
 }
 
+/* Cache the size of the max extent in bytes */
 static struct btrfs_free_space *
 find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
-               unsigned long align)
+               unsigned long align, u64 *max_extent_size)
 {
        struct btrfs_free_space *entry;
        struct rb_node *node;
-       u64 ctl_off;
        u64 tmp;
        u64 align_off;
        int ret;
 
        if (!ctl->free_space_offset.rb_node)
-               return NULL;
+               goto out;
 
        entry = tree_search_offset(ctl, offset_to_bitmap(ctl, *offset), 0, 1);
        if (!entry)
-               return NULL;
+               goto out;
 
        for (node = &entry->offset_index; node; node = rb_next(node)) {
                entry = rb_entry(node, struct btrfs_free_space, offset_index);
-               if (entry->bytes < *bytes)
+               if (entry->bytes < *bytes) {
+                       if (entry->bytes > *max_extent_size)
+                               *max_extent_size = entry->bytes;
                        continue;
+               }
 
                /* make sure the space returned is big enough
                 * to match our requested alignment
                 */
                if (*bytes >= align) {
-                       ctl_off = entry->offset - ctl->start;
-                       tmp = ctl_off + align - 1;;
+                       tmp = entry->offset - ctl->start + align - 1;
                        do_div(tmp, align);
                        tmp = tmp * align + ctl->start;
                        align_off = tmp - entry->offset;
@@ -1499,14 +1511,22 @@ find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
                        tmp = entry->offset;
                }
 
-               if (entry->bytes < *bytes + align_off)
+               if (entry->bytes < *bytes + align_off) {
+                       if (entry->bytes > *max_extent_size)
+                               *max_extent_size = entry->bytes;
                        continue;
+               }
 
                if (entry->bitmap) {
-                       ret = search_bitmap(ctl, entry, &tmp, bytes);
+                       u64 size = *bytes;
+
+                       ret = search_bitmap(ctl, entry, &tmp, &size);
                        if (!ret) {
                                *offset = tmp;
+                               *bytes = size;
                                return entry;
+                       } else if (size > *max_extent_size) {
+                               *max_extent_size = size;
                        }
                        continue;
                }
@@ -1515,7 +1535,7 @@ find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
                *bytes = entry->bytes - align_off;
                return entry;
        }
-
+out:
        return NULL;
 }
 
@@ -2116,7 +2136,8 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
 }
 
 u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
-                              u64 offset, u64 bytes, u64 empty_size)
+                              u64 offset, u64 bytes, u64 empty_size,
+                              u64 *max_extent_size)
 {
        struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
        struct btrfs_free_space *entry = NULL;
@@ -2127,7 +2148,7 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
 
        spin_lock(&ctl->tree_lock);
        entry = find_free_space(ctl, &offset, &bytes_search,
-                               block_group->full_stripe_len);
+                               block_group->full_stripe_len, max_extent_size);
        if (!entry)
                goto out;
 
@@ -2137,7 +2158,6 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
                if (!entry->bytes)
                        free_bitmap(ctl, entry);
        } else {
-
                unlink_free_space(ctl, entry);
                align_gap_len = offset - entry->offset;
                align_gap = entry->offset;
@@ -2151,7 +2171,6 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
                else
                        link_free_space(ctl, entry);
        }
-
 out:
        spin_unlock(&ctl->tree_lock);
 
@@ -2206,7 +2225,8 @@ int btrfs_return_cluster_to_free_space(
 static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
                                   struct btrfs_free_cluster *cluster,
                                   struct btrfs_free_space *entry,
-                                  u64 bytes, u64 min_start)
+                                  u64 bytes, u64 min_start,
+                                  u64 *max_extent_size)
 {
        struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
        int err;
@@ -2218,8 +2238,11 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
        search_bytes = bytes;
 
        err = search_bitmap(ctl, entry, &search_start, &search_bytes);
-       if (err)
+       if (err) {
+               if (search_bytes > *max_extent_size)
+                       *max_extent_size = search_bytes;
                return 0;
+       }
 
        ret = search_start;
        __bitmap_clear_bits(ctl, entry, ret, bytes);
@@ -2234,7 +2257,7 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
  */
 u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
                             struct btrfs_free_cluster *cluster, u64 bytes,
-                            u64 min_start)
+                            u64 min_start, u64 *max_extent_size)
 {
        struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
        struct btrfs_free_space *entry = NULL;
@@ -2254,6 +2277,9 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
 
        entry = rb_entry(node, struct btrfs_free_space, offset_index);
        while(1) {
+               if (entry->bytes < bytes && entry->bytes > *max_extent_size)
+                       *max_extent_size = entry->bytes;
+
                if (entry->bytes < bytes ||
                    (!entry->bitmap && entry->offset < min_start)) {
                        node = rb_next(&entry->offset_index);
@@ -2267,7 +2293,8 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
                if (entry->bitmap) {
                        ret = btrfs_alloc_from_bitmap(block_group,
                                                      cluster, entry, bytes,
-                                                     cluster->window_start);
+                                                     cluster->window_start,
+                                                     max_extent_size);
                        if (ret == 0) {
                                node = rb_next(&entry->offset_index);
                                if (!node)
index c7490416747656e1f3f119c5dc4f91cbf315e2ca..e737f92cf6d0b69ffbcc1e94882abc8693c3e3a5 100644 (file)
@@ -94,7 +94,8 @@ void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl);
 void btrfs_remove_free_space_cache(struct btrfs_block_group_cache
                                     *block_group);
 u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
-                              u64 offset, u64 bytes, u64 empty_size);
+                              u64 offset, u64 bytes, u64 empty_size,
+                              u64 *max_extent_size);
 u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root);
 void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
                           u64 bytes);
@@ -105,7 +106,7 @@ int btrfs_find_space_cluster(struct btrfs_root *root,
 void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster);
 u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
                             struct btrfs_free_cluster *cluster, u64 bytes,
-                            u64 min_start);
+                            u64 min_start, u64 *max_extent_size);
 int btrfs_return_cluster_to_free_space(
                               struct btrfs_block_group_cache *block_group,
                               struct btrfs_free_cluster *cluster);
index f338c5672d583a27dddc37c5e217fa6a8995e271..22ebc13b6c992a0755513253e17f9dd7f20b9706 100644 (file)
@@ -4688,11 +4688,11 @@ static void inode_tree_add(struct inode *inode)
        struct btrfs_inode *entry;
        struct rb_node **p;
        struct rb_node *parent;
+       struct rb_node *new = &BTRFS_I(inode)->rb_node;
        u64 ino = btrfs_ino(inode);
 
        if (inode_unhashed(inode))
                return;
-again:
        parent = NULL;
        spin_lock(&root->inode_lock);
        p = &root->inode_tree.rb_node;
@@ -4707,14 +4707,14 @@ again:
                else {
                        WARN_ON(!(entry->vfs_inode.i_state &
                                  (I_WILL_FREE | I_FREEING)));
-                       rb_erase(parent, &root->inode_tree);
+                       rb_replace_node(parent, new, &root->inode_tree);
                        RB_CLEAR_NODE(parent);
                        spin_unlock(&root->inode_lock);
-                       goto again;
+                       return;
                }
        }
-       rb_link_node(&BTRFS_I(inode)->rb_node, parent, p);
-       rb_insert_color(&BTRFS_I(inode)->rb_node, &root->inode_tree);
+       rb_link_node(new, parent, p);
+       rb_insert_color(new, &root->inode_tree);
        spin_unlock(&root->inode_lock);
 }
 
@@ -8216,6 +8216,10 @@ static int __start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
 
                work = btrfs_alloc_delalloc_work(inode, 0, delay_iput);
                if (unlikely(!work)) {
+                       if (delay_iput)
+                               btrfs_add_delayed_iput(inode);
+                       else
+                               iput(inode);
                        ret = -ENOMEM;
                        goto out;
                }
@@ -8613,11 +8617,13 @@ static const struct inode_operations btrfs_dir_inode_operations = {
        .removexattr    = btrfs_removexattr,
        .permission     = btrfs_permission,
        .get_acl        = btrfs_get_acl,
+       .update_time    = btrfs_update_time,
 };
 static const struct inode_operations btrfs_dir_ro_inode_operations = {
        .lookup         = btrfs_lookup,
        .permission     = btrfs_permission,
        .get_acl        = btrfs_get_acl,
+       .update_time    = btrfs_update_time,
 };
 
 static const struct file_operations btrfs_dir_file_operations = {
index 1a5b9462dd9ae2639513237358c2f55662bf366f..9d46f60cb9439ab3a41ab83f2f323f13aa564590 100644 (file)
@@ -574,7 +574,7 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
        if (ret)
                return ret;
 
-       btrfs_wait_ordered_extents(root, 0);
+       btrfs_wait_ordered_extents(root);
 
        pending_snapshot = kzalloc(sizeof(*pending_snapshot), GFP_NOFS);
        if (!pending_snapshot)
@@ -2696,9 +2696,9 @@ out_unlock:
 static long btrfs_ioctl_file_extent_same(struct file *file,
                                         void __user *argp)
 {
-       struct btrfs_ioctl_same_args *args = argp;
-       struct btrfs_ioctl_same_args same;
-       struct btrfs_ioctl_same_extent_info info;
+       struct btrfs_ioctl_same_args tmp;
+       struct btrfs_ioctl_same_args *same;
+       struct btrfs_ioctl_same_extent_info *info;
        struct inode *src = file->f_dentry->d_inode;
        struct file *dst_file = NULL;
        struct inode *dst;
@@ -2706,6 +2706,7 @@ static long btrfs_ioctl_file_extent_same(struct file *file,
        u64 len;
        int i;
        int ret;
+       unsigned long size;
        u64 bs = BTRFS_I(src)->root->fs_info->sb->s_blocksize;
        bool is_admin = capable(CAP_SYS_ADMIN);
 
@@ -2716,15 +2717,30 @@ static long btrfs_ioctl_file_extent_same(struct file *file,
        if (ret)
                return ret;
 
-       if (copy_from_user(&same,
+       if (copy_from_user(&tmp,
                           (struct btrfs_ioctl_same_args __user *)argp,
-                          sizeof(same))) {
+                          sizeof(tmp))) {
                ret = -EFAULT;
                goto out;
        }
 
-       off = same.logical_offset;
-       len = same.length;
+       size = sizeof(tmp) +
+               tmp.dest_count * sizeof(struct btrfs_ioctl_same_extent_info);
+
+       same = kmalloc(size, GFP_NOFS);
+       if (!same) {
+               ret = -EFAULT;
+               goto out;
+       }
+
+       if (copy_from_user(same,
+                          (struct btrfs_ioctl_same_args __user *)argp, size)) {
+               ret = -EFAULT;
+               goto out;
+       }
+
+       off = same->logical_offset;
+       len = same->length;
 
        /*
         * Limit the total length we will dedupe for each operation.
@@ -2752,27 +2768,28 @@ static long btrfs_ioctl_file_extent_same(struct file *file,
        if (!S_ISREG(src->i_mode))
                goto out;
 
-       ret = 0;
-       for (i = 0; i < same.dest_count; i++) {
-               if (copy_from_user(&info, &args->info[i], sizeof(info))) {
-                       ret = -EFAULT;
-                       goto out;
-               }
+       /* pre-format output fields to sane values */
+       for (i = 0; i < same->dest_count; i++) {
+               same->info[i].bytes_deduped = 0ULL;
+               same->info[i].status = 0;
+       }
 
-               info.bytes_deduped = 0;
+       ret = 0;
+       for (i = 0; i < same->dest_count; i++) {
+               info = &same->info[i];
 
-               dst_file = fget(info.fd);
+               dst_file = fget(info->fd);
                if (!dst_file) {
-                       info.status = -EBADF;
+                       info->status = -EBADF;
                        goto next;
                }
 
                if (!(is_admin || (dst_file->f_mode & FMODE_WRITE))) {
-                       info.status = -EINVAL;
+                       info->status = -EINVAL;
                        goto next;
                }
 
-               info.status = -EXDEV;
+               info->status = -EXDEV;
                if (file->f_path.mnt != dst_file->f_path.mnt)
                        goto next;
 
@@ -2781,32 +2798,29 @@ static long btrfs_ioctl_file_extent_same(struct file *file,
                        goto next;
 
                if (S_ISDIR(dst->i_mode)) {
-                       info.status = -EISDIR;
+                       info->status = -EISDIR;
                        goto next;
                }
 
                if (!S_ISREG(dst->i_mode)) {
-                       info.status = -EACCES;
+                       info->status = -EACCES;
                        goto next;
                }
 
-               info.status = btrfs_extent_same(src, off, len, dst,
-                                               info.logical_offset);
-               if (info.status == 0)
-                       info.bytes_deduped += len;
+               info->status = btrfs_extent_same(src, off, len, dst,
+                                               info->logical_offset);
+               if (info->status == 0)
+                       info->bytes_deduped += len;
 
 next:
                if (dst_file)
                        fput(dst_file);
-
-               if (__put_user_unaligned(info.status, &args->info[i].status) ||
-                   __put_user_unaligned(info.bytes_deduped,
-                                        &args->info[i].bytes_deduped)) {
-                       ret = -EFAULT;
-                       goto out;
-               }                                                               
        }
 
+       ret = copy_to_user(argp, same, size);
+       if (ret)
+               ret = -EFAULT;
+
 out:
        mnt_drop_write_file(file);
        return ret;
@@ -3310,7 +3324,7 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
        }
 
        if (!objectid)
-               objectid = root->root_key.objectid;
+               objectid = BTRFS_FS_TREE_OBJECTID;
 
        location.objectid = objectid;
        location.type = BTRFS_ROOT_ITEM_KEY;
index 966b413a33b800d096eda96d662bb3a66770145f..c702cb62f78a310c8d430fe0d98cfb1173479a0f 100644 (file)
@@ -563,11 +563,10 @@ static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
  * wait for all the ordered extents in a root.  This is done when balancing
  * space between drives.
  */
-void btrfs_wait_ordered_extents(struct btrfs_root *root, int delay_iput)
+void btrfs_wait_ordered_extents(struct btrfs_root *root)
 {
        struct list_head splice, works;
        struct btrfs_ordered_extent *ordered, *next;
-       struct inode *inode;
 
        INIT_LIST_HEAD(&splice);
        INIT_LIST_HEAD(&works);
@@ -580,15 +579,6 @@ void btrfs_wait_ordered_extents(struct btrfs_root *root, int delay_iput)
                                           root_extent_list);
                list_move_tail(&ordered->root_extent_list,
                               &root->ordered_extents);
-               /*
-                * the inode may be getting freed (in sys_unlink path).
-                */
-               inode = igrab(ordered->inode);
-               if (!inode) {
-                       cond_resched_lock(&root->ordered_extent_lock);
-                       continue;
-               }
-
                atomic_inc(&ordered->refs);
                spin_unlock(&root->ordered_extent_lock);
 
@@ -605,21 +595,13 @@ void btrfs_wait_ordered_extents(struct btrfs_root *root, int delay_iput)
        list_for_each_entry_safe(ordered, next, &works, work_list) {
                list_del_init(&ordered->work_list);
                wait_for_completion(&ordered->completion);
-
-               inode = ordered->inode;
                btrfs_put_ordered_extent(ordered);
-               if (delay_iput)
-                       btrfs_add_delayed_iput(inode);
-               else
-                       iput(inode);
-
                cond_resched();
        }
        mutex_unlock(&root->fs_info->ordered_operations_mutex);
 }
 
-void btrfs_wait_all_ordered_extents(struct btrfs_fs_info *fs_info,
-                                   int delay_iput)
+void btrfs_wait_all_ordered_extents(struct btrfs_fs_info *fs_info)
 {
        struct btrfs_root *root;
        struct list_head splice;
@@ -637,7 +619,7 @@ void btrfs_wait_all_ordered_extents(struct btrfs_fs_info *fs_info,
                               &fs_info->ordered_roots);
                spin_unlock(&fs_info->ordered_root_lock);
 
-               btrfs_wait_ordered_extents(root, delay_iput);
+               btrfs_wait_ordered_extents(root);
                btrfs_put_fs_root(root);
 
                spin_lock(&fs_info->ordered_root_lock);
index d9a5aa097b4fea86cabf16e649e11775555de260..0c0b35612d7ad1fc5f5c7db5d267e70f319d8529 100644 (file)
@@ -195,9 +195,8 @@ int btrfs_run_ordered_operations(struct btrfs_trans_handle *trans,
 void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
                                 struct btrfs_root *root,
                                 struct inode *inode);
-void btrfs_wait_ordered_extents(struct btrfs_root *root, int delay_iput);
-void btrfs_wait_all_ordered_extents(struct btrfs_fs_info *fs_info,
-                                   int delay_iput);
+void btrfs_wait_ordered_extents(struct btrfs_root *root);
+void btrfs_wait_all_ordered_extents(struct btrfs_fs_info *fs_info);
 void btrfs_get_logged_extents(struct btrfs_root *log, struct inode *inode);
 void btrfs_wait_logged_extents(struct btrfs_root *log, u64 transid);
 void btrfs_free_logged_extents(struct btrfs_root *log, u64 transid);
index aacc2121e87c5df2f7dac46e90daca2514a7912f..a5a26320503fd4a82358adff8e614240373ea130 100644 (file)
@@ -1548,7 +1548,7 @@ static int get_new_location(struct inode *reloc_inode, u64 *new_bytenr,
               btrfs_file_extent_other_encoding(leaf, fi));
 
        if (num_bytes != btrfs_file_extent_disk_num_bytes(leaf, fi)) {
-               ret = 1;
+               ret = -EINVAL;
                goto out;
        }
 
@@ -1579,7 +1579,7 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
        u64 end;
        u32 nritems;
        u32 i;
-       int ret;
+       int ret = 0;
        int first = 1;
        int dirty = 0;
 
@@ -1642,11 +1642,13 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
 
                ret = get_new_location(rc->data_inode, &new_bytenr,
                                       bytenr, num_bytes);
-               if (ret > 0) {
-                       WARN_ON(1);
-                       continue;
+               if (ret) {
+                       /*
+                        * Don't have to abort since we've not changed anything
+                        * in the file extent yet.
+                        */
+                       break;
                }
-               BUG_ON(ret < 0);
 
                btrfs_set_file_extent_disk_bytenr(leaf, fi, new_bytenr);
                dirty = 1;
@@ -1656,18 +1658,24 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
                                           num_bytes, parent,
                                           btrfs_header_owner(leaf),
                                           key.objectid, key.offset, 1);
-               BUG_ON(ret);
+               if (ret) {
+                       btrfs_abort_transaction(trans, root, ret);
+                       break;
+               }
 
                ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
                                        parent, btrfs_header_owner(leaf),
                                        key.objectid, key.offset, 1);
-               BUG_ON(ret);
+               if (ret) {
+                       btrfs_abort_transaction(trans, root, ret);
+                       break;
+               }
        }
        if (dirty)
                btrfs_mark_buffer_dirty(leaf);
        if (inode)
                btrfs_add_delayed_iput(inode);
-       return 0;
+       return ret;
 }
 
 static noinline_for_stack
@@ -4238,7 +4246,7 @@ int btrfs_relocate_block_group(struct btrfs_root *extent_root, u64 group_start)
                err = ret;
                goto out;
        }
-       btrfs_wait_all_ordered_extents(fs_info, 0);
+       btrfs_wait_all_ordered_extents(fs_info);
 
        while (1) {
                mutex_lock(&fs_info->cleaner_mutex);
@@ -4499,19 +4507,19 @@ out:
        return ret;
 }
 
-void btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
-                          struct btrfs_root *root, struct extent_buffer *buf,
-                          struct extent_buffer *cow)
+int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
+                         struct btrfs_root *root, struct extent_buffer *buf,
+                         struct extent_buffer *cow)
 {
        struct reloc_control *rc;
        struct backref_node *node;
        int first_cow = 0;
        int level;
-       int ret;
+       int ret = 0;
 
        rc = root->fs_info->reloc_ctl;
        if (!rc)
-               return;
+               return 0;
 
        BUG_ON(rc->stage == UPDATE_DATA_PTRS &&
               root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID);
@@ -4547,10 +4555,9 @@ void btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
                        rc->nodes_relocated += buf->len;
        }
 
-       if (level == 0 && first_cow && rc->stage == UPDATE_DATA_PTRS) {
+       if (level == 0 && first_cow && rc->stage == UPDATE_DATA_PTRS)
                ret = replace_file_extents(trans, rc, root, cow);
-               BUG_ON(ret);
-       }
+       return ret;
 }
 
 /*
index 0afcd452fcb3d62c9804089ef1cb056687917784..a18e0e23f6a6742cd21277702ed6599df84c32c3 100644 (file)
@@ -158,12 +158,20 @@ struct scrub_fixup_nodatasum {
        int                     mirror_num;
 };
 
+struct scrub_nocow_inode {
+       u64                     inum;
+       u64                     offset;
+       u64                     root;
+       struct list_head        list;
+};
+
 struct scrub_copy_nocow_ctx {
        struct scrub_ctx        *sctx;
        u64                     logical;
        u64                     len;
        int                     mirror_num;
        u64                     physical_for_dev_replace;
+       struct list_head        inodes;
        struct btrfs_work       work;
 };
 
@@ -245,7 +253,7 @@ static void scrub_wr_bio_end_io_worker(struct btrfs_work *work);
 static int write_page_nocow(struct scrub_ctx *sctx,
                            u64 physical_for_dev_replace, struct page *page);
 static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
-                                     void *ctx);
+                                     struct scrub_copy_nocow_ctx *ctx);
 static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
                            int mirror_num, u64 physical_for_dev_replace);
 static void copy_nocow_pages_worker(struct btrfs_work *work);
@@ -3126,12 +3134,30 @@ static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
        nocow_ctx->mirror_num = mirror_num;
        nocow_ctx->physical_for_dev_replace = physical_for_dev_replace;
        nocow_ctx->work.func = copy_nocow_pages_worker;
+       INIT_LIST_HEAD(&nocow_ctx->inodes);
        btrfs_queue_worker(&fs_info->scrub_nocow_workers,
                           &nocow_ctx->work);
 
        return 0;
 }
 
+static int record_inode_for_nocow(u64 inum, u64 offset, u64 root, void *ctx)
+{
+       struct scrub_copy_nocow_ctx *nocow_ctx = ctx;
+       struct scrub_nocow_inode *nocow_inode;
+
+       nocow_inode = kzalloc(sizeof(*nocow_inode), GFP_NOFS);
+       if (!nocow_inode)
+               return -ENOMEM;
+       nocow_inode->inum = inum;
+       nocow_inode->offset = offset;
+       nocow_inode->root = root;
+       list_add_tail(&nocow_inode->list, &nocow_ctx->inodes);
+       return 0;
+}
+
+#define COPY_COMPLETE 1
+
 static void copy_nocow_pages_worker(struct btrfs_work *work)
 {
        struct scrub_copy_nocow_ctx *nocow_ctx =
@@ -3167,8 +3193,7 @@ static void copy_nocow_pages_worker(struct btrfs_work *work)
        }
 
        ret = iterate_inodes_from_logical(logical, fs_info, path,
-                                         copy_nocow_pages_for_inode,
-                                         nocow_ctx);
+                                         record_inode_for_nocow, nocow_ctx);
        if (ret != 0 && ret != -ENOENT) {
                pr_warn("iterate_inodes_from_logical() failed: log %llu, phys %llu, len %llu, mir %u, ret %d\n",
                        logical, physical_for_dev_replace, len, mirror_num,
@@ -3177,7 +3202,33 @@ static void copy_nocow_pages_worker(struct btrfs_work *work)
                goto out;
        }
 
+       btrfs_end_transaction(trans, root);
+       trans = NULL;
+       while (!list_empty(&nocow_ctx->inodes)) {
+               struct scrub_nocow_inode *entry;
+               entry = list_first_entry(&nocow_ctx->inodes,
+                                        struct scrub_nocow_inode,
+                                        list);
+               list_del_init(&entry->list);
+               ret = copy_nocow_pages_for_inode(entry->inum, entry->offset,
+                                                entry->root, nocow_ctx);
+               kfree(entry);
+               if (ret == COPY_COMPLETE) {
+                       ret = 0;
+                       break;
+               } else if (ret) {
+                       break;
+               }
+       }
 out:
+       while (!list_empty(&nocow_ctx->inodes)) {
+               struct scrub_nocow_inode *entry;
+               entry = list_first_entry(&nocow_ctx->inodes,
+                                        struct scrub_nocow_inode,
+                                        list);
+               list_del_init(&entry->list);
+               kfree(entry);
+       }
        if (trans && !IS_ERR(trans))
                btrfs_end_transaction(trans, root);
        if (not_written)
@@ -3190,20 +3241,25 @@ out:
        scrub_pending_trans_workers_dec(sctx);
 }
 
-static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root, void *ctx)
+static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
+                                     struct scrub_copy_nocow_ctx *nocow_ctx)
 {
-       struct scrub_copy_nocow_ctx *nocow_ctx = ctx;
        struct btrfs_fs_info *fs_info = nocow_ctx->sctx->dev_root->fs_info;
        struct btrfs_key key;
        struct inode *inode;
        struct page *page;
        struct btrfs_root *local_root;
+       struct btrfs_ordered_extent *ordered;
+       struct extent_map *em;
+       struct extent_state *cached_state = NULL;
+       struct extent_io_tree *io_tree;
        u64 physical_for_dev_replace;
-       u64 len;
+       u64 len = nocow_ctx->len;
+       u64 lockstart = offset, lockend = offset + len - 1;
        unsigned long index;
        int srcu_index;
-       int ret;
-       int err;
+       int ret = 0;
+       int err = 0;
 
        key.objectid = root;
        key.type = BTRFS_ROOT_ITEM_KEY;
@@ -3229,9 +3285,33 @@ static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root, void *ctx)
        mutex_lock(&inode->i_mutex);
        inode_dio_wait(inode);
 
-       ret = 0;
        physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
-       len = nocow_ctx->len;
+       io_tree = &BTRFS_I(inode)->io_tree;
+
+       lock_extent_bits(io_tree, lockstart, lockend, 0, &cached_state);
+       ordered = btrfs_lookup_ordered_range(inode, lockstart, len);
+       if (ordered) {
+               btrfs_put_ordered_extent(ordered);
+               goto out_unlock;
+       }
+
+       em = btrfs_get_extent(inode, NULL, 0, lockstart, len, 0);
+       if (IS_ERR(em)) {
+               ret = PTR_ERR(em);
+               goto out_unlock;
+       }
+
+       /*
+        * This extent does not actually cover the logical extent anymore,
+        * move on to the next inode.
+        */
+       if (em->block_start > nocow_ctx->logical ||
+           em->block_start + em->block_len < nocow_ctx->logical + len) {
+               free_extent_map(em);
+               goto out_unlock;
+       }
+       free_extent_map(em);
+
        while (len >= PAGE_CACHE_SIZE) {
                index = offset >> PAGE_CACHE_SHIFT;
 again:
@@ -3247,10 +3327,9 @@ again:
                                goto next_page;
                } else {
                        ClearPageError(page);
-                       err = extent_read_full_page(&BTRFS_I(inode)->
-                                                        io_tree,
-                                                       page, btrfs_get_extent,
-                                                       nocow_ctx->mirror_num);
+                       err = extent_read_full_page_nolock(io_tree, page,
+                                                          btrfs_get_extent,
+                                                          nocow_ctx->mirror_num);
                        if (err) {
                                ret = err;
                                goto next_page;
@@ -3264,6 +3343,7 @@ again:
                         * page in the page cache.
                         */
                        if (page->mapping != inode->i_mapping) {
+                               unlock_page(page);
                                page_cache_release(page);
                                goto again;
                        }
@@ -3287,6 +3367,10 @@ next_page:
                physical_for_dev_replace += PAGE_CACHE_SIZE;
                len -= PAGE_CACHE_SIZE;
        }
+       ret = COPY_COMPLETE;
+out_unlock:
+       unlock_extent_cached(io_tree, lockstart, lockend, &cached_state,
+                            GFP_NOFS);
 out:
        mutex_unlock(&inode->i_mutex);
        iput(inode);
index 3aab10ce63e84812b8e9c5a9250235db9b1bbe86..e913328d0f2adc9c3beb2c37556a479c7615d2fa 100644 (file)
@@ -921,7 +921,7 @@ int btrfs_sync_fs(struct super_block *sb, int wait)
                return 0;
        }
 
-       btrfs_wait_all_ordered_extents(fs_info, 1);
+       btrfs_wait_all_ordered_extents(fs_info);
 
        trans = btrfs_attach_transaction_barrier(root);
        if (IS_ERR(trans)) {
@@ -1340,6 +1340,12 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
                if (ret)
                        goto restore;
        } else {
+               if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) {
+                       btrfs_err(fs_info,
+                               "Remounting read-write after error is not allowed\n");
+                       ret = -EINVAL;
+                       goto restore;
+               }
                if (fs_info->fs_devices->rw_devices == 0) {
                        ret = -EACCES;
                        goto restore;
@@ -1377,6 +1383,16 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
                        pr_warn("btrfs: failed to resume dev_replace\n");
                        goto restore;
                }
+
+               if (!fs_info->uuid_root) {
+                       pr_info("btrfs: creating UUID tree\n");
+                       ret = btrfs_create_uuid_tree(fs_info);
+                       if (ret) {
+                               pr_warn("btrfs: failed to create the uuid tree"
+                                       "%d\n", ret);
+                               goto restore;
+                       }
+               }
                sb->s_flags &= ~MS_RDONLY;
        }
 out:
@@ -1762,6 +1778,9 @@ static void btrfs_print_info(void)
 #ifdef CONFIG_BTRFS_DEBUG
                        ", debug=on"
 #endif
+#ifdef CONFIG_BTRFS_ASSERT
+                       ", assert=on"
+#endif
 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
                        ", integrity-checker=on"
 #endif
index cac4a3f763230f7e448ca6de68a0859a42b13af8..e7a95356df83787b9d43f009708613ff0051e971 100644 (file)
@@ -1603,7 +1603,7 @@ static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info)
 static inline void btrfs_wait_delalloc_flush(struct btrfs_fs_info *fs_info)
 {
        if (btrfs_test_opt(fs_info->tree_root, FLUSHONCOMMIT))
-               btrfs_wait_all_ordered_extents(fs_info, 1);
+               btrfs_wait_all_ordered_extents(fs_info);
 }
 
 int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
index 0d9613c3f5e507cc6087decdbc3ef43e3436b396..79f057c0619a5cfe29a6e47dcd3ebeccebce9809 100644 (file)
@@ -93,7 +93,8 @@
  */
 #define LOG_WALK_PIN_ONLY 0
 #define LOG_WALK_REPLAY_INODES 1
-#define LOG_WALK_REPLAY_ALL 2
+#define LOG_WALK_REPLAY_DIR_INDEX 2
+#define LOG_WALK_REPLAY_ALL 3
 
 static int btrfs_log_inode(struct btrfs_trans_handle *trans,
                             struct btrfs_root *root, struct inode *inode,
@@ -393,6 +394,7 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans,
                if (inode_item) {
                        struct btrfs_inode_item *item;
                        u64 nbytes;
+                       u32 mode;
 
                        item = btrfs_item_ptr(path->nodes[0], path->slots[0],
                                              struct btrfs_inode_item);
@@ -400,9 +402,19 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans,
                        item = btrfs_item_ptr(eb, slot,
                                              struct btrfs_inode_item);
                        btrfs_set_inode_nbytes(eb, item, nbytes);
+
+                       /*
+                        * If this is a directory we need to reset the i_size to
+                        * 0 so that we can set it up properly when replaying
+                        * the rest of the items in this log.
+                        */
+                       mode = btrfs_inode_mode(eb, item);
+                       if (S_ISDIR(mode))
+                               btrfs_set_inode_size(eb, item, 0);
                }
        } else if (inode_item) {
                struct btrfs_inode_item *item;
+               u32 mode;
 
                /*
                 * New inode, set nbytes to 0 so that the nbytes comes out
@@ -410,6 +422,15 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans,
                 */
                item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
                btrfs_set_inode_nbytes(eb, item, 0);
+
+               /*
+                * If this is a directory we need to reset the i_size to 0 so
+                * that we can set it up properly when replaying the rest of
+                * the items in this log.
+                */
+               mode = btrfs_inode_mode(eb, item);
+               if (S_ISDIR(mode))
+                       btrfs_set_inode_size(eb, item, 0);
        }
 insert:
        btrfs_release_path(path);
@@ -1496,6 +1517,7 @@ static noinline int insert_one_name(struct btrfs_trans_handle *trans,
                iput(inode);
                return -EIO;
        }
+
        ret = btrfs_add_link(trans, dir, inode, name, name_len, 1, index);
 
        /* FIXME, put inode into FIXUP list */
@@ -1534,6 +1556,7 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
        u8 log_type;
        int exists;
        int ret = 0;
+       bool update_size = (key->type == BTRFS_DIR_INDEX_KEY);
 
        dir = read_one_inode(root, key->objectid);
        if (!dir)
@@ -1604,6 +1627,10 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
                goto insert;
 out:
        btrfs_release_path(path);
+       if (!ret && update_size) {
+               btrfs_i_size_write(dir, dir->i_size + name_len * 2);
+               ret = btrfs_update_inode(trans, root, dir);
+       }
        kfree(name);
        iput(dir);
        return ret;
@@ -1614,6 +1641,7 @@ insert:
                              name, name_len, log_type, &log_key);
        if (ret && ret != -ENOENT)
                goto out;
+       update_size = false;
        ret = 0;
        goto out;
 }
@@ -2027,6 +2055,15 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
                        if (ret)
                                break;
                }
+
+               if (key.type == BTRFS_DIR_INDEX_KEY &&
+                   wc->stage == LOG_WALK_REPLAY_DIR_INDEX) {
+                       ret = replay_one_dir_item(wc->trans, root, path,
+                                                 eb, i, &key);
+                       if (ret)
+                               break;
+               }
+
                if (wc->stage < LOG_WALK_REPLAY_ALL)
                        continue;
 
@@ -2048,8 +2085,7 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
                                                eb, i, &key);
                        if (ret)
                                break;
-               } else if (key.type == BTRFS_DIR_ITEM_KEY ||
-                          key.type == BTRFS_DIR_INDEX_KEY) {
+               } else if (key.type == BTRFS_DIR_ITEM_KEY) {
                        ret = replay_one_dir_item(wc->trans, root, path,
                                                  eb, i, &key);
                        if (ret)
@@ -3805,6 +3841,7 @@ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
        int ret = 0;
        struct btrfs_root *root;
        struct dentry *old_parent = NULL;
+       struct inode *orig_inode = inode;
 
        /*
         * for regular files, if its inode is already on disk, we don't
@@ -3824,7 +3861,14 @@ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
        }
 
        while (1) {
-               BTRFS_I(inode)->logged_trans = trans->transid;
+               /*
+                * If we are logging a directory then we start with our inode,
+                * not our parents inode, so we need to skipp setting the
+                * logged_trans so that further down in the log code we don't
+                * think this inode has already been logged.
+                */
+               if (inode != orig_inode)
+                       BTRFS_I(inode)->logged_trans = trans->transid;
                smp_mb();
 
                if (BTRFS_I(inode)->last_unlink_trans > last_committed) {
index 0052ca8264d9b37cc171e52e0954b165fd6dad46..a106458302238de3e486b125f2cb9308d4e48d48 100644 (file)
@@ -796,7 +796,8 @@ static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
                        fs_devices->rotating = 1;
 
                fs_devices->open_devices++;
-               if (device->writeable && !device->is_tgtdev_for_dev_replace) {
+               if (device->writeable &&
+                   device->devid != BTRFS_DEV_REPLACE_DEVID) {
                        fs_devices->rw_devices++;
                        list_add(&device->dev_alloc_list,
                                 &fs_devices->alloc_list);
@@ -911,9 +912,9 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
        if (disk_super->label[0]) {
                if (disk_super->label[BTRFS_LABEL_SIZE - 1])
                        disk_super->label[BTRFS_LABEL_SIZE - 1] = '\0';
-               printk(KERN_INFO "device label %s ", disk_super->label);
+               printk(KERN_INFO "btrfs: device label %s ", disk_super->label);
        } else {
-               printk(KERN_INFO "device fsid %pU ", disk_super->fsid);
+               printk(KERN_INFO "btrfs: device fsid %pU ", disk_super->fsid);
        }
 
        printk(KERN_CONT "devid %llu transid %llu %s\n", devid, transid, path);
index 25badd1aec5c677215f20d9bd970261deab2dd35..f4a08d7fa2f70a58a8513110988cc4928a674bdb 100644 (file)
@@ -56,7 +56,7 @@ void __cachefiles_printk_object(struct cachefiles_object *object,
                       object->fscache.cookie->parent,
                       object->fscache.cookie->netfs_data,
                       object->fscache.cookie->flags);
-               if (keybuf)
+               if (keybuf && cookie->def)
                        keylen = cookie->def->get_key(cookie->netfs_data, keybuf,
                                                      CACHEFILES_KEYBUF_SIZE);
                else
index 34c88b83e39f1214b0ed7f2c22bc58f5acd00fa4..12b0eef84183be5edb35627b169cbe49a516a961 100644 (file)
@@ -162,8 +162,9 @@ int cachefiles_update_object_xattr(struct cachefiles_object *object,
 int cachefiles_check_auxdata(struct cachefiles_object *object)
 {
        struct cachefiles_xattr *auxbuf;
+       enum fscache_checkaux validity;
        struct dentry *dentry = object->dentry;
-       unsigned int dlen;
+       ssize_t xlen;
        int ret;
 
        ASSERT(dentry);
@@ -174,22 +175,22 @@ int cachefiles_check_auxdata(struct cachefiles_object *object)
        if (!auxbuf)
                return -ENOMEM;
 
-       auxbuf->len = vfs_getxattr(dentry, cachefiles_xattr_cache,
-                                  &auxbuf->type, 512 + 1);
-       if (auxbuf->len < 1)
-               return -ESTALE;
-
-       if (auxbuf->type != object->fscache.cookie->def->type)
-               return -ESTALE;
+       xlen = vfs_getxattr(dentry, cachefiles_xattr_cache,
+                           &auxbuf->type, 512 + 1);
+       ret = -ESTALE;
+       if (xlen < 1 ||
+           auxbuf->type != object->fscache.cookie->def->type)
+               goto error;
 
-       dlen = auxbuf->len - 1;
-       ret = fscache_check_aux(&object->fscache, &auxbuf->data, dlen);
+       xlen--;
+       validity = fscache_check_aux(&object->fscache, &auxbuf->data, xlen);
+       if (validity != FSCACHE_CHECKAUX_OKAY)
+               goto error;
 
+       ret = 0;
+error:
        kfree(auxbuf);
-       if (ret != FSCACHE_CHECKAUX_OKAY)
-               return -ESTALE;
-
-       return 0;
+       return ret;
 }
 
 /*
index 854a8f05a61007bf14b2dd75e7f080fbb248cce4..02b0df769e2db23d18d7787e27066ebaa684bbd0 100644 (file)
@@ -1458,7 +1458,7 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
 
        trace_nfs_atomic_open_enter(dir, ctx, open_flags);
        nfs_block_sillyrename(dentry->d_parent);
-       inode = NFS_PROTO(dir)->open_context(dir, ctx, open_flags, &attr);
+       inode = NFS_PROTO(dir)->open_context(dir, ctx, open_flags, &attr, opened);
        nfs_unblock_sillyrename(dentry->d_parent);
        if (IS_ERR(inode)) {
                err = PTR_ERR(inode);
index e5b804dd944c16a8adf4de17ee6588562cec55e8..77efaf15ec9019a2a6ed4527b214a166796497c4 100644 (file)
@@ -19,6 +19,7 @@ nfs4_file_open(struct inode *inode, struct file *filp)
        struct inode *dir;
        unsigned openflags = filp->f_flags;
        struct iattr attr;
+       int opened = 0;
        int err;
 
        /*
@@ -55,7 +56,7 @@ nfs4_file_open(struct inode *inode, struct file *filp)
                nfs_wb_all(inode);
        }
 
-       inode = NFS_PROTO(dir)->open_context(dir, ctx, openflags, &attr);
+       inode = NFS_PROTO(dir)->open_context(dir, ctx, openflags, &attr, &opened);
        if (IS_ERR(inode)) {
                err = PTR_ERR(inode);
                switch (err) {
index 95604f64cab86632d7a166ce588ea9ffd5d87e95..c7c295e556ed87501c069053d0c133e44dcadc97 100644 (file)
@@ -185,6 +185,7 @@ nfs4_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds)
        if (status)
                goto out_put;
 
+       smp_wmb();
        ds->ds_clp = clp;
        dprintk("%s [new] addr: %s\n", __func__, ds->ds_remotestr);
 out:
@@ -801,34 +802,35 @@ nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx)
        struct nfs4_file_layout_dsaddr *dsaddr = FILELAYOUT_LSEG(lseg)->dsaddr;
        struct nfs4_pnfs_ds *ds = dsaddr->ds_list[ds_idx];
        struct nfs4_deviceid_node *devid = FILELAYOUT_DEVID_NODE(lseg);
-
-       if (filelayout_test_devid_unavailable(devid))
-               return NULL;
+       struct nfs4_pnfs_ds *ret = ds;
 
        if (ds == NULL) {
                printk(KERN_ERR "NFS: %s: No data server for offset index %d\n",
                        __func__, ds_idx);
                filelayout_mark_devid_invalid(devid);
-               return NULL;
+               goto out;
        }
+       smp_rmb();
        if (ds->ds_clp)
-               return ds;
+               goto out_test_devid;
 
        if (test_and_set_bit(NFS4DS_CONNECTING, &ds->ds_state) == 0) {
                struct nfs_server *s = NFS_SERVER(lseg->pls_layout->plh_inode);
                int err;
 
                err = nfs4_ds_connect(s, ds);
-               if (err) {
+               if (err)
                        nfs4_mark_deviceid_unavailable(devid);
-                       ds = NULL;
-               }
                nfs4_clear_ds_conn_bit(ds);
        } else {
                /* Either ds is connected, or ds is NULL */
                nfs4_wait_ds_connect(ds);
        }
-       return ds;
+out_test_devid:
+       if (filelayout_test_devid_unavailable(devid))
+               ret = NULL;
+out:
+       return ret;
 }
 
 module_param(dataserver_retrans, uint, 0644);
index 989bb9d3074d0d4c88d63a486f05491423b090d4..d53d6785cba27f5c6442831e51eded9d04b054f3 100644 (file)
@@ -912,6 +912,7 @@ struct nfs4_opendata {
        struct iattr attrs;
        unsigned long timestamp;
        unsigned int rpc_done : 1;
+       unsigned int file_created : 1;
        unsigned int is_recover : 1;
        int rpc_status;
        int cancelled;
@@ -1946,8 +1947,13 @@ static int _nfs4_proc_open(struct nfs4_opendata *data)
 
        nfs_fattr_map_and_free_names(server, &data->f_attr);
 
-       if (o_arg->open_flags & O_CREAT)
+       if (o_arg->open_flags & O_CREAT) {
                update_changeattr(dir, &o_res->cinfo);
+               if (o_arg->open_flags & O_EXCL)
+                       data->file_created = 1;
+               else if (o_res->cinfo.before != o_res->cinfo.after)
+                       data->file_created = 1;
+       }
        if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
                server->caps &= ~NFS_CAP_POSIX_LOCK;
        if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
@@ -2191,7 +2197,8 @@ static int _nfs4_do_open(struct inode *dir,
                        struct nfs_open_context *ctx,
                        int flags,
                        struct iattr *sattr,
-                       struct nfs4_label *label)
+                       struct nfs4_label *label,
+                       int *opened)
 {
        struct nfs4_state_owner  *sp;
        struct nfs4_state     *state = NULL;
@@ -2261,6 +2268,8 @@ static int _nfs4_do_open(struct inode *dir,
                        nfs_setsecurity(state->inode, opendata->o_res.f_attr, olabel);
                }
        }
+       if (opendata->file_created)
+               *opened |= FILE_CREATED;
 
        if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server))
                *ctx_th = opendata->f_attr.mdsthreshold;
@@ -2289,7 +2298,8 @@ static struct nfs4_state *nfs4_do_open(struct inode *dir,
                                        struct nfs_open_context *ctx,
                                        int flags,
                                        struct iattr *sattr,
-                                       struct nfs4_label *label)
+                                       struct nfs4_label *label,
+                                       int *opened)
 {
        struct nfs_server *server = NFS_SERVER(dir);
        struct nfs4_exception exception = { };
@@ -2297,7 +2307,7 @@ static struct nfs4_state *nfs4_do_open(struct inode *dir,
        int status;
 
        do {
-               status = _nfs4_do_open(dir, ctx, flags, sattr, label);
+               status = _nfs4_do_open(dir, ctx, flags, sattr, label, opened);
                res = ctx->state;
                trace_nfs4_open_file(ctx, flags, status);
                if (status == 0)
@@ -2659,7 +2669,8 @@ out:
 }
 
 static struct inode *
-nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx, int open_flags, struct iattr *attr)
+nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx,
+               int open_flags, struct iattr *attr, int *opened)
 {
        struct nfs4_state *state;
        struct nfs4_label l = {0, 0, 0, NULL}, *label = NULL;
@@ -2667,7 +2678,7 @@ nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx, int open_flags
        label = nfs4_label_init_security(dir, ctx->dentry, attr, &l);
 
        /* Protect against concurrent sillydeletes */
-       state = nfs4_do_open(dir, ctx, open_flags, attr, label);
+       state = nfs4_do_open(dir, ctx, open_flags, attr, label, opened);
 
        nfs4_label_release_security(label);
 
@@ -3332,6 +3343,7 @@ nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
        struct nfs4_label l, *ilabel = NULL;
        struct nfs_open_context *ctx;
        struct nfs4_state *state;
+       int opened = 0;
        int status = 0;
 
        ctx = alloc_nfs_open_context(dentry, FMODE_READ);
@@ -3341,7 +3353,7 @@ nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
        ilabel = nfs4_label_init_security(dir, dentry, sattr, &l);
 
        sattr->ia_mode &= ~current_umask();
-       state = nfs4_do_open(dir, ctx, flags, sattr, ilabel);
+       state = nfs4_do_open(dir, ctx, flags, sattr, ilabel, &opened);
        if (IS_ERR(state)) {
                status = PTR_ERR(state);
                goto out;
@@ -7564,8 +7576,10 @@ nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
 {
        int err;
        struct page *page;
-       rpc_authflavor_t flavor;
+       rpc_authflavor_t flavor = RPC_AUTH_MAXFLAVOR;
        struct nfs4_secinfo_flavors *flavors;
+       struct nfs4_secinfo4 *secinfo;
+       int i;
 
        page = alloc_page(GFP_KERNEL);
        if (!page) {
@@ -7587,9 +7601,31 @@ nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
        if (err)
                goto out_freepage;
 
-       flavor = nfs_find_best_sec(flavors);
-       if (err == 0)
-               err = nfs4_lookup_root_sec(server, fhandle, info, flavor);
+       for (i = 0; i < flavors->num_flavors; i++) {
+               secinfo = &flavors->flavors[i];
+
+               switch (secinfo->flavor) {
+               case RPC_AUTH_NULL:
+               case RPC_AUTH_UNIX:
+               case RPC_AUTH_GSS:
+                       flavor = rpcauth_get_pseudoflavor(secinfo->flavor,
+                                       &secinfo->flavor_info);
+                       break;
+               default:
+                       flavor = RPC_AUTH_MAXFLAVOR;
+                       break;
+               }
+
+               if (flavor != RPC_AUTH_MAXFLAVOR) {
+                       err = nfs4_lookup_root_sec(server, fhandle,
+                                                  info, flavor);
+                       if (!err)
+                               break;
+               }
+       }
+
+       if (flavor == RPC_AUTH_MAXFLAVOR)
+               err = -EPERM;
 
 out_freepage:
        put_page(page);
index 0ba679866e504ec126720f3eb7b78e210703f538..da276640f7763d2463c0fa99cb832a86d243f10c 100644 (file)
@@ -94,6 +94,7 @@ void nilfs_forget_buffer(struct buffer_head *bh)
        clear_buffer_nilfs_volatile(bh);
        clear_buffer_nilfs_checked(bh);
        clear_buffer_nilfs_redirected(bh);
+       clear_buffer_async_write(bh);
        clear_buffer_dirty(bh);
        if (nilfs_page_buffers_clean(page))
                __nilfs_clear_page_dirty(page);
@@ -429,6 +430,7 @@ void nilfs_clear_dirty_page(struct page *page, bool silent)
                                        "discard block %llu, size %zu",
                                        (u64)bh->b_blocknr, bh->b_size);
                        }
+                       clear_buffer_async_write(bh);
                        clear_buffer_dirty(bh);
                        clear_buffer_nilfs_volatile(bh);
                        clear_buffer_nilfs_checked(bh);
index bd88a7461063bba02f31c6f873902c9c8e87e943..9f6b486b6c01a0e6711ca5930e78474d0739e35e 100644 (file)
@@ -665,7 +665,7 @@ static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode,
 
                bh = head = page_buffers(page);
                do {
-                       if (!buffer_dirty(bh))
+                       if (!buffer_dirty(bh) || buffer_async_write(bh))
                                continue;
                        get_bh(bh);
                        list_add_tail(&bh->b_assoc_buffers, listp);
@@ -699,7 +699,8 @@ static void nilfs_lookup_dirty_node_buffers(struct inode *inode,
                for (i = 0; i < pagevec_count(&pvec); i++) {
                        bh = head = page_buffers(pvec.pages[i]);
                        do {
-                               if (buffer_dirty(bh)) {
+                               if (buffer_dirty(bh) &&
+                                               !buffer_async_write(bh)) {
                                        get_bh(bh);
                                        list_add_tail(&bh->b_assoc_buffers,
                                                      listp);
@@ -1579,6 +1580,7 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
 
                list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
                                    b_assoc_buffers) {
+                       set_buffer_async_write(bh);
                        if (bh->b_page != bd_page) {
                                if (bd_page) {
                                        lock_page(bd_page);
@@ -1592,6 +1594,7 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
 
                list_for_each_entry(bh, &segbuf->sb_payload_buffers,
                                    b_assoc_buffers) {
+                       set_buffer_async_write(bh);
                        if (bh == segbuf->sb_super_root) {
                                if (bh->b_page != bd_page) {
                                        lock_page(bd_page);
@@ -1677,6 +1680,7 @@ static void nilfs_abort_logs(struct list_head *logs, int err)
        list_for_each_entry(segbuf, logs, sb_list) {
                list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
                                    b_assoc_buffers) {
+                       clear_buffer_async_write(bh);
                        if (bh->b_page != bd_page) {
                                if (bd_page)
                                        end_page_writeback(bd_page);
@@ -1686,6 +1690,7 @@ static void nilfs_abort_logs(struct list_head *logs, int err)
 
                list_for_each_entry(bh, &segbuf->sb_payload_buffers,
                                    b_assoc_buffers) {
+                       clear_buffer_async_write(bh);
                        if (bh == segbuf->sb_super_root) {
                                if (bh->b_page != bd_page) {
                                        end_page_writeback(bd_page);
@@ -1755,6 +1760,7 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
                                    b_assoc_buffers) {
                        set_buffer_uptodate(bh);
                        clear_buffer_dirty(bh);
+                       clear_buffer_async_write(bh);
                        if (bh->b_page != bd_page) {
                                if (bd_page)
                                        end_page_writeback(bd_page);
@@ -1776,6 +1782,7 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
                                    b_assoc_buffers) {
                        set_buffer_uptodate(bh);
                        clear_buffer_dirty(bh);
+                       clear_buffer_async_write(bh);
                        clear_buffer_delay(bh);
                        clear_buffer_nilfs_volatile(bh);
                        clear_buffer_nilfs_redirected(bh);
index ef999729e274ead1ed88c699ee86b8011fd5ff77..0d3a97d2d5f659caeb60f20f448ce25371036ff2 100644 (file)
@@ -70,9 +70,10 @@ static int ocfs2_dentry_revalidate(struct dentry *dentry, unsigned int flags)
         */
        if (inode == NULL) {
                unsigned long gen = (unsigned long) dentry->d_fsdata;
-               unsigned long pgen =
-                       OCFS2_I(dentry->d_parent->d_inode)->ip_dir_lock_gen;
-
+               unsigned long pgen;
+               spin_lock(&dentry->d_lock);
+               pgen = OCFS2_I(dentry->d_parent->d_inode)->ip_dir_lock_gen;
+               spin_unlock(&dentry->d_lock);
                trace_ocfs2_dentry_revalidate_negative(dentry->d_name.len,
                                                       dentry->d_name.name,
                                                       pgen, gen);
index 121da2dc3be841e579dd64fdea6bcfb89f5c121c..d4e81e4a9b0489de2eb66899eb23b9d87ea81ae5 100644 (file)
@@ -1924,7 +1924,7 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err)
 {
        int tmp, hangup_needed = 0;
        struct ocfs2_super *osb = NULL;
-       char nodestr[8];
+       char nodestr[12];
 
        trace_ocfs2_dismount_volume(sb);
 
index 73feacc49b2ef3bc2b088f7d74ed05d4195a45ac..fd777032c2ba7551dd10fbf9572289b36d09ad27 100644 (file)
@@ -1163,21 +1163,6 @@ static struct reiserfs_journal_list *find_newer_jl_for_cn(struct
        return NULL;
 }
 
-static int newer_jl_done(struct reiserfs_journal_cnode *cn)
-{
-       struct super_block *sb = cn->sb;
-       b_blocknr_t blocknr = cn->blocknr;
-
-       cn = cn->hprev;
-       while (cn) {
-               if (cn->sb == sb && cn->blocknr == blocknr && cn->jlist &&
-                   atomic_read(&cn->jlist->j_commit_left) != 0)
-                                   return 0;
-               cn = cn->hprev;
-       }
-       return 1;
-}
-
 static void remove_journal_hash(struct super_block *,
                                struct reiserfs_journal_cnode **,
                                struct reiserfs_journal_list *, unsigned long,
@@ -1353,7 +1338,6 @@ static int flush_journal_list(struct super_block *s,
                reiserfs_warning(s, "clm-2048", "called with wcount %d",
                                 atomic_read(&journal->j_wcount));
        }
-       BUG_ON(jl->j_trans_id == 0);
 
        /* if flushall == 0, the lock is already held */
        if (flushall) {
@@ -1593,31 +1577,6 @@ static int flush_journal_list(struct super_block *s,
        return err;
 }
 
-static int test_transaction(struct super_block *s,
-                            struct reiserfs_journal_list *jl)
-{
-       struct reiserfs_journal_cnode *cn;
-
-       if (jl->j_len == 0 || atomic_read(&jl->j_nonzerolen) == 0)
-               return 1;
-
-       cn = jl->j_realblock;
-       while (cn) {
-               /* if the blocknr == 0, this has been cleared from the hash,
-                ** skip it
-                */
-               if (cn->blocknr == 0) {
-                       goto next;
-               }
-               if (cn->bh && !newer_jl_done(cn))
-                       return 0;
-             next:
-               cn = cn->next;
-               cond_resched();
-       }
-       return 0;
-}
-
 static int write_one_transaction(struct super_block *s,
                                 struct reiserfs_journal_list *jl,
                                 struct buffer_chunk *chunk)
@@ -1805,6 +1764,8 @@ static int flush_used_journal_lists(struct super_block *s,
                        break;
                tjl = JOURNAL_LIST_ENTRY(tjl->j_list.next);
        }
+       get_journal_list(jl);
+       get_journal_list(flush_jl);
        /* try to find a group of blocks we can flush across all the
         ** transactions, but only bother if we've actually spanned
         ** across multiple lists
@@ -1813,6 +1774,8 @@ static int flush_used_journal_lists(struct super_block *s,
                ret = kupdate_transactions(s, jl, &tjl, &trans_id, len, i);
        }
        flush_journal_list(s, flush_jl, 1);
+       put_journal_list(s, flush_jl);
+       put_journal_list(s, jl);
        return 0;
 }
 
@@ -3868,27 +3831,6 @@ int reiserfs_prepare_for_journal(struct super_block *sb,
        return 1;
 }
 
-static void flush_old_journal_lists(struct super_block *s)
-{
-       struct reiserfs_journal *journal = SB_JOURNAL(s);
-       struct reiserfs_journal_list *jl;
-       struct list_head *entry;
-       time_t now = get_seconds();
-
-       while (!list_empty(&journal->j_journal_list)) {
-               entry = journal->j_journal_list.next;
-               jl = JOURNAL_LIST_ENTRY(entry);
-               /* this check should always be run, to send old lists to disk */
-               if (jl->j_timestamp < (now - (JOURNAL_MAX_TRANS_AGE * 4)) &&
-                   atomic_read(&jl->j_commit_left) == 0 &&
-                   test_transaction(s, jl)) {
-                       flush_used_journal_lists(s, jl);
-               } else {
-                       break;
-               }
-       }
-}
-
 /*
 ** long and ugly.  If flush, will not return until all commit
 ** blocks and all real buffers in the trans are on disk.
@@ -4232,7 +4174,6 @@ static int do_journal_end(struct reiserfs_transaction_handle *th,
                        }
                }
        }
-       flush_old_journal_lists(sb);
 
        journal->j_current_jl->j_list_bitmap =
            get_list_bitmap(sb, journal->j_current_jl);
index 3a96c9783a8b959015af96e0b0548bd8e51cc606..0225c20f877047abb0a47a856c6099594964b6aa 100644 (file)
@@ -264,6 +264,8 @@ out_free_sb:
  */
 static inline void destroy_super(struct super_block *s)
 {
+       list_lru_destroy(&s->s_dentry_lru);
+       list_lru_destroy(&s->s_inode_lru);
 #ifdef CONFIG_SMP
        free_percpu(s->s_files);
 #endif
@@ -323,8 +325,6 @@ void deactivate_locked_super(struct super_block *s)
 
                /* caches are now gone, we can safely kill the shrinker now */
                unregister_shrinker(&s->s_shrink);
-               list_lru_destroy(&s->s_dentry_lru);
-               list_lru_destroy(&s->s_inode_lru);
 
                put_filesystem(fs);
                put_super(s);
index d0c6a007ce835cf869fac695eb5445b34be6d814..eda10959714f2acad6ce5d9be82fffbe9426513b 100644 (file)
@@ -487,6 +487,7 @@ static int v7_fill_super(struct super_block *sb, void *data, int silent)
        sbi->s_sb = sb;
        sbi->s_block_base = 0;
        sbi->s_type = FSTYPE_V7;
+       mutex_init(&sbi->s_lock);
        sb->s_fs_info = sbi;
        
        sb_set_blocksize(sb, 512);
index 7e5aae4bf46fd1c1e65da56615238ffe4944fd3b..6eaf5edf1ea1577e88cafc60184963e1b18df5a5 100644 (file)
@@ -30,18 +30,17 @@ void udf_free_inode(struct inode *inode)
 {
        struct super_block *sb = inode->i_sb;
        struct udf_sb_info *sbi = UDF_SB(sb);
+       struct logicalVolIntegrityDescImpUse *lvidiu = udf_sb_lvidiu(sb);
 
-       mutex_lock(&sbi->s_alloc_mutex);
-       if (sbi->s_lvid_bh) {
-               struct logicalVolIntegrityDescImpUse *lvidiu =
-                                                       udf_sb_lvidiu(sbi);
+       if (lvidiu) {
+               mutex_lock(&sbi->s_alloc_mutex);
                if (S_ISDIR(inode->i_mode))
                        le32_add_cpu(&lvidiu->numDirs, -1);
                else
                        le32_add_cpu(&lvidiu->numFiles, -1);
                udf_updated_lvid(sb);
+               mutex_unlock(&sbi->s_alloc_mutex);
        }
-       mutex_unlock(&sbi->s_alloc_mutex);
 
        udf_free_blocks(sb, NULL, &UDF_I(inode)->i_location, 0, 1);
 }
@@ -55,6 +54,7 @@ struct inode *udf_new_inode(struct inode *dir, umode_t mode, int *err)
        uint32_t start = UDF_I(dir)->i_location.logicalBlockNum;
        struct udf_inode_info *iinfo;
        struct udf_inode_info *dinfo = UDF_I(dir);
+       struct logicalVolIntegrityDescImpUse *lvidiu;
 
        inode = new_inode(sb);
 
@@ -92,12 +92,10 @@ struct inode *udf_new_inode(struct inode *dir, umode_t mode, int *err)
                return NULL;
        }
 
-       if (sbi->s_lvid_bh) {
-               struct logicalVolIntegrityDescImpUse *lvidiu;
-
+       lvidiu = udf_sb_lvidiu(sb);
+       if (lvidiu) {
                iinfo->i_unique = lvid_get_unique_id(sb);
                mutex_lock(&sbi->s_alloc_mutex);
-               lvidiu = udf_sb_lvidiu(sbi);
                if (S_ISDIR(mode))
                        le32_add_cpu(&lvidiu->numDirs, 1);
                else
index 839a2bad7f45b693db4ed478598b997c42077712..91219385691d8f80d1db9aed3973183bb931a48d 100644 (file)
@@ -94,13 +94,25 @@ static unsigned int udf_count_free(struct super_block *);
 static int udf_statfs(struct dentry *, struct kstatfs *);
 static int udf_show_options(struct seq_file *, struct dentry *);
 
-struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct udf_sb_info *sbi)
+struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct super_block *sb)
 {
-       struct logicalVolIntegrityDesc *lvid =
-               (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
-       __u32 number_of_partitions = le32_to_cpu(lvid->numOfPartitions);
-       __u32 offset = number_of_partitions * 2 *
-                               sizeof(uint32_t)/sizeof(uint8_t);
+       struct logicalVolIntegrityDesc *lvid;
+       unsigned int partnum;
+       unsigned int offset;
+
+       if (!UDF_SB(sb)->s_lvid_bh)
+               return NULL;
+       lvid = (struct logicalVolIntegrityDesc *)UDF_SB(sb)->s_lvid_bh->b_data;
+       partnum = le32_to_cpu(lvid->numOfPartitions);
+       if ((sb->s_blocksize - sizeof(struct logicalVolIntegrityDescImpUse) -
+            offsetof(struct logicalVolIntegrityDesc, impUse)) /
+            (2 * sizeof(uint32_t)) < partnum) {
+               udf_err(sb, "Logical volume integrity descriptor corrupted "
+                       "(numOfPartitions = %u)!\n", partnum);
+               return NULL;
+       }
+       /* The offset is to skip freeSpaceTable and sizeTable arrays */
+       offset = partnum * 2 * sizeof(uint32_t);
        return (struct logicalVolIntegrityDescImpUse *)&(lvid->impUse[offset]);
 }
 
@@ -629,9 +641,10 @@ static int udf_remount_fs(struct super_block *sb, int *flags, char *options)
        struct udf_options uopt;
        struct udf_sb_info *sbi = UDF_SB(sb);
        int error = 0;
+       struct logicalVolIntegrityDescImpUse *lvidiu = udf_sb_lvidiu(sb);
 
-       if (sbi->s_lvid_bh) {
-               int write_rev = le16_to_cpu(udf_sb_lvidiu(sbi)->minUDFWriteRev);
+       if (lvidiu) {
+               int write_rev = le16_to_cpu(lvidiu->minUDFWriteRev);
                if (write_rev > UDF_MAX_WRITE_VERSION && !(*flags & MS_RDONLY))
                        return -EACCES;
        }
@@ -1905,11 +1918,12 @@ static void udf_open_lvid(struct super_block *sb)
 
        if (!bh)
                return;
-
-       mutex_lock(&sbi->s_alloc_mutex);
        lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
-       lvidiu = udf_sb_lvidiu(sbi);
+       lvidiu = udf_sb_lvidiu(sb);
+       if (!lvidiu)
+               return;
 
+       mutex_lock(&sbi->s_alloc_mutex);
        lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
        lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
        udf_time_to_disk_stamp(&lvid->recordingDateAndTime,
@@ -1937,10 +1951,12 @@ static void udf_close_lvid(struct super_block *sb)
 
        if (!bh)
                return;
+       lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
+       lvidiu = udf_sb_lvidiu(sb);
+       if (!lvidiu)
+               return;
 
        mutex_lock(&sbi->s_alloc_mutex);
-       lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
-       lvidiu = udf_sb_lvidiu(sbi);
        lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
        lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
        udf_time_to_disk_stamp(&lvid->recordingDateAndTime, CURRENT_TIME);
@@ -2093,15 +2109,19 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
 
        if (sbi->s_lvid_bh) {
                struct logicalVolIntegrityDescImpUse *lvidiu =
-                                                       udf_sb_lvidiu(sbi);
-               uint16_t minUDFReadRev = le16_to_cpu(lvidiu->minUDFReadRev);
-               uint16_t minUDFWriteRev = le16_to_cpu(lvidiu->minUDFWriteRev);
-               /* uint16_t maxUDFWriteRev =
-                               le16_to_cpu(lvidiu->maxUDFWriteRev); */
+                                                       udf_sb_lvidiu(sb);
+               uint16_t minUDFReadRev;
+               uint16_t minUDFWriteRev;
 
+               if (!lvidiu) {
+                       ret = -EINVAL;
+                       goto error_out;
+               }
+               minUDFReadRev = le16_to_cpu(lvidiu->minUDFReadRev);
+               minUDFWriteRev = le16_to_cpu(lvidiu->minUDFWriteRev);
                if (minUDFReadRev > UDF_MAX_READ_VERSION) {
                        udf_err(sb, "minUDFReadRev=%x (max is %x)\n",
-                               le16_to_cpu(lvidiu->minUDFReadRev),
+                               minUDFReadRev,
                                UDF_MAX_READ_VERSION);
                        ret = -EINVAL;
                        goto error_out;
@@ -2265,11 +2285,7 @@ static int udf_statfs(struct dentry *dentry, struct kstatfs *buf)
        struct logicalVolIntegrityDescImpUse *lvidiu;
        u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
 
-       if (sbi->s_lvid_bh != NULL)
-               lvidiu = udf_sb_lvidiu(sbi);
-       else
-               lvidiu = NULL;
-
+       lvidiu = udf_sb_lvidiu(sb);
        buf->f_type = UDF_SUPER_MAGIC;
        buf->f_bsize = sb->s_blocksize;
        buf->f_blocks = sbi->s_partmaps[sbi->s_partition].s_partition_len;
index ed401e94aa8c956dd8685ab33f496fc4ea52eec7..1f32c7bd9f57f21fb2859413cacde08b2949fd3a 100644 (file)
@@ -162,7 +162,7 @@ static inline struct udf_sb_info *UDF_SB(struct super_block *sb)
        return sb->s_fs_info;
 }
 
-struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct udf_sb_info *sbi);
+struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct super_block *sb);
 
 int udf_compute_nr_groups(struct super_block *sb, u32 partition);
 
index 88c5ea75ebf66abd175bdf2d71898380f2aca9a8..f1d85cfc0a54d1cb54ebe95937872127f2e63142 100644 (file)
@@ -628,6 +628,7 @@ xfs_buf_item_unlock(
                else if (aborted) {
                        ASSERT(XFS_FORCED_SHUTDOWN(lip->li_mountp));
                        if (lip->li_flags & XFS_LI_IN_AIL) {
+                               spin_lock(&lip->li_ailp->xa_lock);
                                xfs_trans_ail_delete(lip->li_ailp, lip,
                                                     SHUTDOWN_LOG_IO_ERROR);
                        }
index 069537c845e5cc424ce02bf3cec7838250357a32..20bf8e8002d6fd2733782af97f373ddaf2bcd8d6 100644 (file)
@@ -1224,6 +1224,7 @@ xfs_da3_node_toosmall(
        /* start with smaller blk num */
        forward = nodehdr.forw < nodehdr.back;
        for (i = 0; i < 2; forward = !forward, i++) {
+               struct xfs_da3_icnode_hdr thdr;
                if (forward)
                        blkno = nodehdr.forw;
                else
@@ -1236,10 +1237,10 @@ xfs_da3_node_toosmall(
                        return(error);
 
                node = bp->b_addr;
-               xfs_da3_node_hdr_from_disk(&nodehdr, node);
+               xfs_da3_node_hdr_from_disk(&thdr, node);
                xfs_trans_brelse(state->args->trans, bp);
 
-               if (count - nodehdr.count >= 0)
+               if (count - thdr.count >= 0)
                        break;  /* fits with at least 25% to spare */
        }
        if (i >= 2) {
index 1edb5cc3e5f495fdca3059054d1f2e7bd5d9fd58..18272c766a508ab53deb5465448ca26bf02110d6 100644 (file)
@@ -515,7 +515,7 @@ typedef struct xfs_swapext
 /*     XFS_IOC_GETBIOSIZE ---- deprecated 47      */
 #define XFS_IOC_GETBMAPX       _IOWR('X', 56, struct getbmap)
 #define XFS_IOC_ZERO_RANGE     _IOW ('X', 57, struct xfs_flock64)
-#define XFS_IOC_FREE_EOFBLOCKS _IOR ('X', 58, struct xfs_eofblocks)
+#define XFS_IOC_FREE_EOFBLOCKS _IOR ('X', 58, struct xfs_fs_eofblocks)
 
 /*
  * ioctl commands that replace IRIX syssgi()'s
index 193206ba43582c0aecbea7afd1d23220bfff554f..474807a401c864e7681d3f8d0f111358c45536fc 100644 (file)
@@ -119,11 +119,6 @@ xfs_inode_free(
                ip->i_itemp = NULL;
        }
 
-       /* asserts to verify all state is correct here */
-       ASSERT(atomic_read(&ip->i_pincount) == 0);
-       ASSERT(!spin_is_locked(&ip->i_flags_lock));
-       ASSERT(!xfs_isiflocked(ip));
-
        /*
         * Because we use RCU freeing we need to ensure the inode always
         * appears to be reclaimed with an invalid inode number when in the
@@ -135,6 +130,10 @@ xfs_inode_free(
        ip->i_ino = 0;
        spin_unlock(&ip->i_flags_lock);
 
+       /* asserts to verify all state is correct here */
+       ASSERT(atomic_read(&ip->i_pincount) == 0);
+       ASSERT(!xfs_isiflocked(ip));
+
        call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
 }
 
index dabda9521b4becc2ded846307aa062a093a7a3d3..cc179878fe41f0f2a19e961b03a955a56c6e4a16 100644 (file)
@@ -1970,6 +1970,13 @@ xlog_recover_do_inode_buffer(
  * magic number.  If we don't recognise the magic number in the buffer, then
  * return a LSN of -1 so that the caller knows it was an unrecognised block and
  * so can recover the buffer.
+ *
+ * Note: we cannot rely solely on magic number matches to determine that the
+ * buffer has a valid LSN - we also need to verify that it belongs to this
+ * filesystem, so we need to extract the object's LSN and compare it to that
+ * which we read from the superblock. If the UUIDs don't match, then we've got a
+ * stale metadata block from an old filesystem instance that we need to recover
+ * over the top of.
  */
 static xfs_lsn_t
 xlog_recover_get_buf_lsn(
@@ -1980,6 +1987,8 @@ xlog_recover_get_buf_lsn(
        __uint16_t              magic16;
        __uint16_t              magicda;
        void                    *blk = bp->b_addr;
+       uuid_t                  *uuid;
+       xfs_lsn_t               lsn = -1;
 
        /* v4 filesystems always recover immediately */
        if (!xfs_sb_version_hascrc(&mp->m_sb))
@@ -1992,43 +2001,79 @@ xlog_recover_get_buf_lsn(
        case XFS_ABTB_MAGIC:
        case XFS_ABTC_MAGIC:
        case XFS_IBT_CRC_MAGIC:
-       case XFS_IBT_MAGIC:
-               return be64_to_cpu(
-                               ((struct xfs_btree_block *)blk)->bb_u.s.bb_lsn);
+       case XFS_IBT_MAGIC: {
+               struct xfs_btree_block *btb = blk;
+
+               lsn = be64_to_cpu(btb->bb_u.s.bb_lsn);
+               uuid = &btb->bb_u.s.bb_uuid;
+               break;
+       }
        case XFS_BMAP_CRC_MAGIC:
-       case XFS_BMAP_MAGIC:
-               return be64_to_cpu(
-                               ((struct xfs_btree_block *)blk)->bb_u.l.bb_lsn);
+       case XFS_BMAP_MAGIC: {
+               struct xfs_btree_block *btb = blk;
+
+               lsn = be64_to_cpu(btb->bb_u.l.bb_lsn);
+               uuid = &btb->bb_u.l.bb_uuid;
+               break;
+       }
        case XFS_AGF_MAGIC:
-               return be64_to_cpu(((struct xfs_agf *)blk)->agf_lsn);
+               lsn = be64_to_cpu(((struct xfs_agf *)blk)->agf_lsn);
+               uuid = &((struct xfs_agf *)blk)->agf_uuid;
+               break;
        case XFS_AGFL_MAGIC:
-               return be64_to_cpu(((struct xfs_agfl *)blk)->agfl_lsn);
+               lsn = be64_to_cpu(((struct xfs_agfl *)blk)->agfl_lsn);
+               uuid = &((struct xfs_agfl *)blk)->agfl_uuid;
+               break;
        case XFS_AGI_MAGIC:
-               return be64_to_cpu(((struct xfs_agi *)blk)->agi_lsn);
+               lsn = be64_to_cpu(((struct xfs_agi *)blk)->agi_lsn);
+               uuid = &((struct xfs_agi *)blk)->agi_uuid;
+               break;
        case XFS_SYMLINK_MAGIC:
-               return be64_to_cpu(((struct xfs_dsymlink_hdr *)blk)->sl_lsn);
+               lsn = be64_to_cpu(((struct xfs_dsymlink_hdr *)blk)->sl_lsn);
+               uuid = &((struct xfs_dsymlink_hdr *)blk)->sl_uuid;
+               break;
        case XFS_DIR3_BLOCK_MAGIC:
        case XFS_DIR3_DATA_MAGIC:
        case XFS_DIR3_FREE_MAGIC:
-               return be64_to_cpu(((struct xfs_dir3_blk_hdr *)blk)->lsn);
+               lsn = be64_to_cpu(((struct xfs_dir3_blk_hdr *)blk)->lsn);
+               uuid = &((struct xfs_dir3_blk_hdr *)blk)->uuid;
+               break;
        case XFS_ATTR3_RMT_MAGIC:
-               return be64_to_cpu(((struct xfs_attr3_rmt_hdr *)blk)->rm_lsn);
+               lsn = be64_to_cpu(((struct xfs_attr3_rmt_hdr *)blk)->rm_lsn);
+               uuid = &((struct xfs_attr3_rmt_hdr *)blk)->rm_uuid;
+               break;
        case XFS_SB_MAGIC:
-               return be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn);
+               lsn = be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn);
+               uuid = &((struct xfs_dsb *)blk)->sb_uuid;
+               break;
        default:
                break;
        }
 
+       if (lsn != (xfs_lsn_t)-1) {
+               if (!uuid_equal(&mp->m_sb.sb_uuid, uuid))
+                       goto recover_immediately;
+               return lsn;
+       }
+
        magicda = be16_to_cpu(((struct xfs_da_blkinfo *)blk)->magic);
        switch (magicda) {
        case XFS_DIR3_LEAF1_MAGIC:
        case XFS_DIR3_LEAFN_MAGIC:
        case XFS_DA3_NODE_MAGIC:
-               return be64_to_cpu(((struct xfs_da3_blkinfo *)blk)->lsn);
+               lsn = be64_to_cpu(((struct xfs_da3_blkinfo *)blk)->lsn);
+               uuid = &((struct xfs_da3_blkinfo *)blk)->uuid;
+               break;
        default:
                break;
        }
 
+       if (lsn != (xfs_lsn_t)-1) {
+               if (!uuid_equal(&mp->m_sb.sb_uuid, uuid))
+                       goto recover_immediately;
+               return lsn;
+       }
+
        /*
         * We do individual object checks on dquot and inode buffers as they
         * have their own individual LSN records. Also, we could have a stale
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..b1a49677fe2540e93aae3b40b1c235949ac756cf 100644 (file)
@@ -0,0 +1 @@
+/* no content, but patch(1) dislikes empty files */
index 290734191f72d85a855022a2f0e24e7a6c895ffb..b46fb45f2cca4a5881b64d93577e6ae03a221670 100644 (file)
@@ -1322,10 +1322,9 @@ extern int drm_newctx(struct drm_device *dev, void *data,
 extern int drm_rmctx(struct drm_device *dev, void *data,
                     struct drm_file *file_priv);
 
-extern void drm_legacy_ctxbitmap_init(struct drm_device *dev);
-extern void drm_legacy_ctxbitmap_cleanup(struct drm_device *dev);
-extern void drm_legacy_ctxbitmap_release(struct drm_device *dev,
-                                        struct drm_file *file_priv);
+extern int drm_ctxbitmap_init(struct drm_device *dev);
+extern void drm_ctxbitmap_cleanup(struct drm_device *dev);
+extern void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle);
 
 extern int drm_setsareactx(struct drm_device *dev, void *data,
                           struct drm_file *file_priv);
index f7f1d7169b11c332612ba09a1663141870e08849..089743ade734fc564e18bfa7be35608da68d6fde 100644 (file)
@@ -158,6 +158,26 @@ static inline bool balloon_page_movable(struct page *page)
        return false;
 }
 
+/*
+ * isolated_balloon_page - identify an isolated balloon page on private
+ *                        compaction/migration page lists.
+ *
+ * After a compaction thread isolates a balloon page for migration, it raises
+ * the page refcount to prevent concurrent compaction threads from re-isolating
+ * the same page. For that reason putback_movable_pages(), or other routines
+ * that need to identify isolated balloon pages on private pagelists, cannot
+ * rely on balloon_page_movable() to accomplish the task.
+ */
+static inline bool isolated_balloon_page(struct page *page)
+{
+       /* Already isolated balloon pages, by default, have a raised refcount */
+       if (page_flags_cleared(page) && !page_mapped(page) &&
+           page_count(page) >= 2)
+               return __is_movable_balloon_page(page);
+
+       return false;
+}
+
 /*
  * balloon_page_insert - insert a page into the balloon's page list and make
  *                      the page->mapping assignment accordingly.
@@ -243,6 +263,11 @@ static inline bool balloon_page_movable(struct page *page)
        return false;
 }
 
+static inline bool isolated_balloon_page(struct page *page)
+{
+       return false;
+}
+
 static inline bool balloon_page_isolate(struct page *page)
 {
        return false;
index d66033f418c98bf1818ba2c5fcbdc7bcebe8021b..0333e605ea0d752c5aa306957a80aa72c5d4094f 100644 (file)
@@ -242,6 +242,7 @@ extern int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc,
                                 struct bcma_device *core, bool enable);
 extern void bcma_core_pci_up(struct bcma_bus *bus);
 extern void bcma_core_pci_down(struct bcma_bus *bus);
+extern void bcma_core_pci_power_save(struct bcma_bus *bus, bool up);
 
 extern int bcma_core_pci_pcibios_map_irq(const struct pci_dev *dev);
 extern int bcma_core_pci_plat_dev_init(struct pci_dev *dev);
index 2fdb4a451b49bd626d9415b231c76b7ac927cf69..0e6f765aa1f5adf3acbf54e27cea6fbe495de1cb 100644 (file)
@@ -862,6 +862,17 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq)
        return blk_queue_get_max_sectors(q, rq->cmd_flags);
 }
 
+static inline unsigned int blk_rq_count_bios(struct request *rq)
+{
+       unsigned int nr_bios = 0;
+       struct bio *bio;
+
+       __rq_for_each_bio(bio, rq)
+               nr_bios++;
+
+       return nr_bios;
+}
+
 /*
  * Request issue related functions.
  */
index 653073de09e379ef1c8b04c1a96d0ef2c948f5a3..ed419c62dde1876f4561179b6b4bc3052a50ed14 100644 (file)
@@ -406,13 +406,14 @@ int dm_noflush_suspending(struct dm_target *ti);
 union map_info *dm_get_mapinfo(struct bio *bio);
 union map_info *dm_get_rq_mapinfo(struct request *rq);
 
+struct queue_limits *dm_get_queue_limits(struct mapped_device *md);
+
 /*
  * Geometry functions.
  */
 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo);
 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo);
 
-
 /*-----------------------------------------------------------------
  * Functions for manipulating device-mapper tables.
  *---------------------------------------------------------------*/
index d8b512496e50c155e99712f5226e115159beabc1..fc4a9aa7dd82c7a26e69ac21cce27bdc399dc9b1 100644 (file)
 #include <asm/unaligned.h>
 
 #ifdef __KERNEL__
-extern __be16          eth_type_trans(struct sk_buff *skb, struct net_device *dev);
+__be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
 extern const struct header_ops eth_header_ops;
 
-extern int eth_header(struct sk_buff *skb, struct net_device *dev,
-                     unsigned short type,
-                     const void *daddr, const void *saddr, unsigned len);
-extern int eth_rebuild_header(struct sk_buff *skb);
-extern int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr);
-extern int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
-extern void eth_header_cache_update(struct hh_cache *hh,
-                                   const struct net_device *dev,
-                                   const unsigned char *haddr);
-extern int eth_prepare_mac_addr_change(struct net_device *dev, void *p);
-extern void eth_commit_mac_addr_change(struct net_device *dev, void *p);
-extern int eth_mac_addr(struct net_device *dev, void *p);
-extern int eth_change_mtu(struct net_device *dev, int new_mtu);
-extern int eth_validate_addr(struct net_device *dev);
-
-
-
-extern struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
+int eth_header(struct sk_buff *skb, struct net_device *dev, unsigned short type,
+              const void *daddr, const void *saddr, unsigned len);
+int eth_rebuild_header(struct sk_buff *skb);
+int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr);
+int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh,
+                    __be16 type);
+void eth_header_cache_update(struct hh_cache *hh, const struct net_device *dev,
+                            const unsigned char *haddr);
+int eth_prepare_mac_addr_change(struct net_device *dev, void *p);
+void eth_commit_mac_addr_change(struct net_device *dev, void *p);
+int eth_mac_addr(struct net_device *dev, void *p);
+int eth_change_mtu(struct net_device *dev, int new_mtu);
+int eth_validate_addr(struct net_device *dev);
+
+struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
                                            unsigned int rxqs);
 #define alloc_etherdev(sizeof_priv) alloc_etherdev_mq(sizeof_priv, 1)
 #define alloc_etherdev_mq(sizeof_priv, count) alloc_etherdev_mqs(sizeof_priv, count, count)
index e460ef8319841dd83d6f6f1eb8653dfd5452d6e0..5009fa16b5d8f08369ccdcb6537bdeb426598d0a 100644 (file)
@@ -27,7 +27,7 @@
 #include <linux/if_fc.h>
 
 #ifdef __KERNEL__
-extern struct net_device *alloc_fcdev(int sizeof_priv);
+struct net_device *alloc_fcdev(int sizeof_priv);
 #endif
 
 #endif /* _LINUX_FCDEVICE_H */
index 155bafd9e886607f5ee6b9bd824c2a1fa8ec6c32..9a79f0106da1a66ea0f7f647468fc3fd5c351108 100644 (file)
 #include <linux/if_fddi.h>
 
 #ifdef __KERNEL__
-extern __be16  fddi_type_trans(struct sk_buff *skb,
-                               struct net_device *dev);
-extern int fddi_change_mtu(struct net_device *dev, int new_mtu);
-extern struct net_device *alloc_fddidev(int sizeof_priv);
+__be16 fddi_type_trans(struct sk_buff *skb, struct net_device *dev);
+int fddi_change_mtu(struct net_device *dev, int new_mtu);
+struct net_device *alloc_fddidev(int sizeof_priv);
 #endif
 
 #endif /* _LINUX_FDDIDEVICE_H */
index f148e49084106fca84ae61b2880420dc0bc2798d..8ec23fb0b412290c767ab444b564029911ba7786 100644 (file)
@@ -31,11 +31,11 @@ struct hippi_cb {
        __u32   ifield;
 };
 
-extern __be16 hippi_type_trans(struct sk_buff *skb, struct net_device *dev);
-extern int hippi_change_mtu(struct net_device *dev, int new_mtu);
-extern int hippi_mac_addr(struct net_device *dev, void *p);
-extern int hippi_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p);
-extern struct net_device *alloc_hippi_dev(int sizeof_priv);
+__be16 hippi_type_trans(struct sk_buff *skb, struct net_device *dev);
+int hippi_change_mtu(struct net_device *dev, int new_mtu);
+int hippi_mac_addr(struct net_device *dev, void *p);
+int hippi_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p);
+struct net_device *alloc_hippi_dev(int sizeof_priv);
 #endif
 
 #endif /* _LINUX_HIPPIDEVICE_H */
index a3b8b2e2d24438129df020cc8d7af78562123cad..d98503bde7e9bc7ace0e2c3ddc5f2ae019f527ff 100644 (file)
 /*
  * Framework version for util services.
  */
+#define UTIL_FW_MINOR  0
+
+#define UTIL_WS2K8_FW_MAJOR  1
+#define UTIL_WS2K8_FW_VERSION     (UTIL_WS2K8_FW_MAJOR << 16 | UTIL_FW_MINOR)
 
 #define UTIL_FW_MAJOR  3
-#define UTIL_FW_MINOR  0
-#define UTIL_FW_MAJOR_MINOR     (UTIL_FW_MAJOR << 16 | UTIL_FW_MINOR)
+#define UTIL_FW_VERSION     (UTIL_FW_MAJOR << 16 | UTIL_FW_MINOR)
 
 
 /*
index 79640e015a86c6eb4a8bab2c0d2ee78eaf241558..0d678aefe69df6155b683ce039a42e4b7c4a1cab 100644 (file)
@@ -147,25 +147,27 @@ struct in_ifaddr {
        unsigned long           ifa_tstamp; /* updated timestamp */
 };
 
-extern int register_inetaddr_notifier(struct notifier_block *nb);
-extern int unregister_inetaddr_notifier(struct notifier_block *nb);
+int register_inetaddr_notifier(struct notifier_block *nb);
+int unregister_inetaddr_notifier(struct notifier_block *nb);
 
-extern void inet_netconf_notify_devconf(struct net *net, int type, int ifindex,
-                                       struct ipv4_devconf *devconf);
+void inet_netconf_notify_devconf(struct net *net, int type, int ifindex,
+                                struct ipv4_devconf *devconf);
 
-extern struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref);
+struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref);
 static inline struct net_device *ip_dev_find(struct net *net, __be32 addr)
 {
        return __ip_dev_find(net, addr, true);
 }
 
-extern int             inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b);
-extern int             devinet_ioctl(struct net *net, unsigned int cmd, void __user *);
-extern void            devinet_init(void);
-extern struct in_device        *inetdev_by_index(struct net *, int);
-extern __be32          inet_select_addr(const struct net_device *dev, __be32 dst, int scope);
-extern __be32          inet_confirm_addr(struct in_device *in_dev, __be32 dst, __be32 local, int scope);
-extern struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix, __be32 mask);
+int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b);
+int devinet_ioctl(struct net *net, unsigned int cmd, void __user *);
+void devinet_init(void);
+struct in_device *inetdev_by_index(struct net *, int);
+__be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope);
+__be32 inet_confirm_addr(struct in_device *in_dev, __be32 dst, __be32 local,
+                        int scope);
+struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix,
+                                   __be32 mask);
 
 static __inline__ int inet_ifa_match(__be32 addr, struct in_ifaddr *ifa)
 {
@@ -218,7 +220,7 @@ static inline struct in_device *__in_dev_get_rtnl(const struct net_device *dev)
        return rtnl_dereference(dev->ip_ptr);
 }
 
-extern void in_dev_finish_destroy(struct in_device *idev);
+void in_dev_finish_destroy(struct in_device *idev);
 
 static inline void in_dev_put(struct in_device *idev)
 {
index 28ea38439313226861c5c6df902b7b92c3cb5969..b7f1f3bb346d38aa16ce0f8ffb7cdd94201567f8 100644 (file)
@@ -370,7 +370,7 @@ static inline struct raw6_sock *raw6_sk(const struct sock *sk)
 #endif /* IS_ENABLED(CONFIG_IPV6) */
 
 #define INET6_MATCH(__sk, __net, __saddr, __daddr, __ports, __dif)     \
-       ((inet_sk(__sk)->inet_portpair == (__ports))            &&      \
+       (((__sk)->sk_portpair == (__ports))                     &&      \
         ((__sk)->sk_family == AF_INET6)                        &&      \
         ipv6_addr_equal(&inet6_sk(__sk)->daddr, (__saddr))     &&      \
         ipv6_addr_equal(&inet6_sk(__sk)->rcv_saddr, (__daddr)) &&      \
@@ -379,7 +379,7 @@ static inline struct raw6_sock *raw6_sk(const struct sock *sk)
         net_eq(sock_net(__sk), (__net)))
 
 #define INET6_TW_MATCH(__sk, __net, __saddr, __daddr, __ports, __dif)     \
-       ((inet_twsk(__sk)->tw_portpair == (__ports))                    && \
+       (((__sk)->sk_portpair == (__ports))                             && \
         ((__sk)->sk_family == AF_INET6)                                && \
         ipv6_addr_equal(&inet6_twsk(__sk)->tw_v6_daddr, (__saddr))     && \
         ipv6_addr_equal(&inet6_twsk(__sk)->tw_v6_rcv_saddr, (__daddr)) && \
index 482ad2d84a32dae333c74cccb2d1394b247d5ae8..672ddc4de4af0511b20a4d269bcae1c517c3d5c4 100644 (file)
@@ -439,6 +439,17 @@ static inline char *hex_byte_pack(char *buf, u8 byte)
        return buf;
 }
 
+extern const char hex_asc_upper[];
+#define hex_asc_upper_lo(x)    hex_asc_upper[((x) & 0x0f)]
+#define hex_asc_upper_hi(x)    hex_asc_upper[((x) & 0xf0) >> 4]
+
+static inline char *hex_byte_pack_upper(char *buf, u8 byte)
+{
+       *buf++ = hex_asc_upper_hi(byte);
+       *buf++ = hex_asc_upper_lo(byte);
+       return buf;
+}
+
 static inline char * __deprecated pack_hex_byte(char *buf, u8 byte)
 {
        return hex_byte_pack(buf, byte);
index 60e95872da2983365300f1abb193b6e81974b5de..ecc82b37c4ccf00fb863ecf6981de19bbccec52a 100644 (file)
@@ -53,23 +53,6 @@ struct mem_cgroup_reclaim_cookie {
        unsigned int generation;
 };
 
-enum mem_cgroup_filter_t {
-       VISIT,          /* visit current node */
-       SKIP,           /* skip the current node and continue traversal */
-       SKIP_TREE,      /* skip the whole subtree and continue traversal */
-};
-
-/*
- * mem_cgroup_filter_t predicate might instruct mem_cgroup_iter_cond how to
- * iterate through the hierarchy tree. Each tree element is checked by the
- * predicate before it is returned by the iterator. If a filter returns
- * SKIP or SKIP_TREE then the iterator code continues traversal (with the
- * next node down the hierarchy or the next node that doesn't belong under the
- * memcg's subtree).
- */
-typedef enum mem_cgroup_filter_t
-(*mem_cgroup_iter_filter)(struct mem_cgroup *memcg, struct mem_cgroup *root);
-
 #ifdef CONFIG_MEMCG
 /*
  * All "charge" functions with gfp_mask should use GFP_KERNEL or
@@ -137,18 +120,9 @@ mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
 extern void mem_cgroup_end_migration(struct mem_cgroup *memcg,
        struct page *oldpage, struct page *newpage, bool migration_ok);
 
-struct mem_cgroup *mem_cgroup_iter_cond(struct mem_cgroup *root,
-                                  struct mem_cgroup *prev,
-                                  struct mem_cgroup_reclaim_cookie *reclaim,
-                                  mem_cgroup_iter_filter cond);
-
-static inline struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
-                                  struct mem_cgroup *prev,
-                                  struct mem_cgroup_reclaim_cookie *reclaim)
-{
-       return mem_cgroup_iter_cond(root, prev, reclaim, NULL);
-}
-
+struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
+                                  struct mem_cgroup *,
+                                  struct mem_cgroup_reclaim_cookie *);
 void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
 
 /*
@@ -260,9 +234,9 @@ static inline void mem_cgroup_dec_page_stat(struct page *page,
        mem_cgroup_update_page_stat(page, idx, -1);
 }
 
-enum mem_cgroup_filter_t
-mem_cgroup_soft_reclaim_eligible(struct mem_cgroup *memcg,
-               struct mem_cgroup *root);
+unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
+                                               gfp_t gfp_mask,
+                                               unsigned long *total_scanned);
 
 void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx);
 static inline void mem_cgroup_count_vm_event(struct mm_struct *mm,
@@ -376,15 +350,6 @@ static inline void mem_cgroup_end_migration(struct mem_cgroup *memcg,
                struct page *oldpage, struct page *newpage, bool migration_ok)
 {
 }
-static inline struct mem_cgroup *
-mem_cgroup_iter_cond(struct mem_cgroup *root,
-               struct mem_cgroup *prev,
-               struct mem_cgroup_reclaim_cookie *reclaim,
-               mem_cgroup_iter_filter cond)
-{
-       /* first call must return non-NULL, second return NULL */
-       return (struct mem_cgroup *)(unsigned long)!prev;
-}
 
 static inline struct mem_cgroup *
 mem_cgroup_iter(struct mem_cgroup *root,
@@ -471,11 +436,11 @@ static inline void mem_cgroup_dec_page_stat(struct page *page,
 }
 
 static inline
-enum mem_cgroup_filter_t
-mem_cgroup_soft_reclaim_eligible(struct mem_cgroup *memcg,
-               struct mem_cgroup *root)
+unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
+                                           gfp_t gfp_mask,
+                                           unsigned long *total_scanned)
 {
-       return VISIT;
+       return 0;
 }
 
 static inline void mem_cgroup_split_huge_fixup(struct page *head)
index ccd4260834c57da2ea30fc5b20714ef15afe076f..bab49da8a0f0b1bd2e01d516fdd0e39a07162326 100644 (file)
@@ -15,8 +15,8 @@
 #include <linux/spinlock_types.h>
 #include <linux/linkage.h>
 #include <linux/lockdep.h>
-
 #include <linux/atomic.h>
+#include <asm/processor.h>
 
 /*
  * Simple, straightforward mutexes with strict semantics:
@@ -175,8 +175,8 @@ extern void mutex_unlock(struct mutex *lock);
 
 extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
 
-#ifndef CONFIG_HAVE_ARCH_MUTEX_CPU_RELAX
-#define arch_mutex_cpu_relax() cpu_relax()
+#ifndef arch_mutex_cpu_relax
+# define arch_mutex_cpu_relax() cpu_relax()
 #endif
 
 #endif
index 4f27575ce1d67ebe74d1bba4f40a70f6edbdea11..ca9ec85409058e6f92248b6d98285d16a4347434 100644 (file)
@@ -195,27 +195,23 @@ enum {
        SOCK_WAKE_URG,
 };
 
-extern int          sock_wake_async(struct socket *sk, int how, int band);
-extern int          sock_register(const struct net_proto_family *fam);
-extern void         sock_unregister(int family);
-extern int          __sock_create(struct net *net, int family, int type, int proto,
-                                struct socket **res, int kern);
-extern int          sock_create(int family, int type, int proto,
-                                struct socket **res);
-extern int          sock_create_kern(int family, int type, int proto,
-                                     struct socket **res);
-extern int          sock_create_lite(int family, int type, int proto,
-                                     struct socket **res); 
-extern void         sock_release(struct socket *sock);
-extern int          sock_sendmsg(struct socket *sock, struct msghdr *msg,
-                                 size_t len);
-extern int          sock_recvmsg(struct socket *sock, struct msghdr *msg,
-                                 size_t size, int flags);
-extern struct file  *sock_alloc_file(struct socket *sock, int flags, const char *dname);
-extern struct socket *sockfd_lookup(int fd, int *err);
-extern struct socket *sock_from_file(struct file *file, int *err);
+int sock_wake_async(struct socket *sk, int how, int band);
+int sock_register(const struct net_proto_family *fam);
+void sock_unregister(int family);
+int __sock_create(struct net *net, int family, int type, int proto,
+                 struct socket **res, int kern);
+int sock_create(int family, int type, int proto, struct socket **res);
+int sock_create_kern(int family, int type, int proto, struct socket **res);
+int sock_create_lite(int family, int type, int proto, struct socket **res);
+void sock_release(struct socket *sock);
+int sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t len);
+int sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
+                int flags);
+struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname);
+struct socket *sockfd_lookup(int fd, int *err);
+struct socket *sock_from_file(struct file *file, int *err);
 #define                     sockfd_put(sock) fput(sock->file)
-extern int          net_ratelimit(void);
+int net_ratelimit(void);
 
 #define net_ratelimited_function(function, ...)                        \
 do {                                                           \
@@ -243,32 +239,28 @@ do {                                                              \
 #define net_random()           prandom_u32()
 #define net_srandom(seed)      prandom_seed((__force u32)(seed))
 
-extern int          kernel_sendmsg(struct socket *sock, struct msghdr *msg,
-                                   struct kvec *vec, size_t num, size_t len);
-extern int          kernel_recvmsg(struct socket *sock, struct msghdr *msg,
-                                   struct kvec *vec, size_t num,
-                                   size_t len, int flags);
+int kernel_sendmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec,
+                  size_t num, size_t len);
+int kernel_recvmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec,
+                  size_t num, size_t len, int flags);
 
-extern int kernel_bind(struct socket *sock, struct sockaddr *addr,
-                      int addrlen);
-extern int kernel_listen(struct socket *sock, int backlog);
-extern int kernel_accept(struct socket *sock, struct socket **newsock,
-                        int flags);
-extern int kernel_connect(struct socket *sock, struct sockaddr *addr,
-                         int addrlen, int flags);
-extern int kernel_getsockname(struct socket *sock, struct sockaddr *addr,
-                             int *addrlen);
-extern int kernel_getpeername(struct socket *sock, struct sockaddr *addr,
-                             int *addrlen);
-extern int kernel_getsockopt(struct socket *sock, int level, int optname,
-                            char *optval, int *optlen);
-extern int kernel_setsockopt(struct socket *sock, int level, int optname,
-                            char *optval, unsigned int optlen);
-extern int kernel_sendpage(struct socket *sock, struct page *page, int offset,
-                          size_t size, int flags);
-extern int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg);
-extern int kernel_sock_shutdown(struct socket *sock,
-                               enum sock_shutdown_cmd how);
+int kernel_bind(struct socket *sock, struct sockaddr *addr, int addrlen);
+int kernel_listen(struct socket *sock, int backlog);
+int kernel_accept(struct socket *sock, struct socket **newsock, int flags);
+int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen,
+                  int flags);
+int kernel_getsockname(struct socket *sock, struct sockaddr *addr,
+                      int *addrlen);
+int kernel_getpeername(struct socket *sock, struct sockaddr *addr,
+                      int *addrlen);
+int kernel_getsockopt(struct socket *sock, int level, int optname, char *optval,
+                     int *optlen);
+int kernel_setsockopt(struct socket *sock, int level, int optname, char *optval,
+                     unsigned int optlen);
+int kernel_sendpage(struct socket *sock, struct page *page, int offset,
+                   size_t size, int flags);
+int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg);
+int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how);
 
 #define MODULE_ALIAS_NETPROTO(proto) \
        MODULE_ALIAS("net-pf-" __stringify(proto))
index 3de49aca451970a738b5ae55cfd74656248ac9ea..f5cd464271bff1b7c4a92862d3ada2f685ff560c 100644 (file)
@@ -60,8 +60,8 @@ struct wireless_dev;
 #define SET_ETHTOOL_OPS(netdev,ops) \
        ( (netdev)->ethtool_ops = (ops) )
 
-extern void netdev_set_default_ethtool_ops(struct net_device *dev,
-                                          const struct ethtool_ops *ops);
+void netdev_set_default_ethtool_ops(struct net_device *dev,
+                                   const struct ethtool_ops *ops);
 
 /* hardware address assignment types */
 #define NET_ADDR_PERM          0       /* address is permanent (default) */
@@ -298,7 +298,7 @@ struct netdev_boot_setup {
 };
 #define NETDEV_BOOT_SETUP_MAX 8
 
-extern int __init netdev_boot_setup(char *str);
+int __init netdev_boot_setup(char *str);
 
 /*
  * Structure for NAPI scheduling similar to tasklet but with weighting
@@ -394,7 +394,7 @@ enum rx_handler_result {
 typedef enum rx_handler_result rx_handler_result_t;
 typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
 
-extern void __napi_schedule(struct napi_struct *n);
+void __napi_schedule(struct napi_struct *n);
 
 static inline bool napi_disable_pending(struct napi_struct *n)
 {
@@ -445,8 +445,8 @@ static inline bool napi_reschedule(struct napi_struct *napi)
  *
  * Mark NAPI processing as complete.
  */
-extern void __napi_complete(struct napi_struct *n);
-extern void napi_complete(struct napi_struct *n);
+void __napi_complete(struct napi_struct *n);
+void napi_complete(struct napi_struct *n);
 
 /**
  *     napi_by_id - lookup a NAPI by napi_id
@@ -455,7 +455,7 @@ extern void napi_complete(struct napi_struct *n);
  * lookup @napi_id in napi_hash table
  * must be called under rcu_read_lock()
  */
-extern struct napi_struct *napi_by_id(unsigned int napi_id);
+struct napi_struct *napi_by_id(unsigned int napi_id);
 
 /**
  *     napi_hash_add - add a NAPI to global hashtable
@@ -463,7 +463,7 @@ extern struct napi_struct *napi_by_id(unsigned int napi_id);
  *
  * generate a new napi_id and store a @napi under it in napi_hash
  */
-extern void napi_hash_add(struct napi_struct *napi);
+void napi_hash_add(struct napi_struct *napi);
 
 /**
  *     napi_hash_del - remove a NAPI from global table
@@ -472,7 +472,7 @@ extern void napi_hash_add(struct napi_struct *napi);
  * Warning: caller must observe rcu grace period
  * before freeing memory containing @napi
  */
-extern void napi_hash_del(struct napi_struct *napi);
+void napi_hash_del(struct napi_struct *napi);
 
 /**
  *     napi_disable - prevent NAPI from scheduling
@@ -664,8 +664,8 @@ static inline void rps_reset_sock_flow(struct rps_sock_flow_table *table,
 extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
 
 #ifdef CONFIG_RFS_ACCEL
-extern bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
-                               u32 flow_id, u16 filter_id);
+bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id,
+                        u16 filter_id);
 #endif
 
 /* This structure contains an instance of an RX queue. */
@@ -1143,8 +1143,18 @@ struct net_device {
        struct list_head        dev_list;
        struct list_head        napi_list;
        struct list_head        unreg_list;
-       struct list_head        upper_dev_list; /* List of upper devices */
-       struct list_head        lower_dev_list;
+
+       /* directly linked devices, like slaves for bonding */
+       struct {
+               struct list_head upper;
+               struct list_head lower;
+       } adj_list;
+
+       /* all linked devices, *including* neighbours */
+       struct {
+               struct list_head upper;
+               struct list_head lower;
+       } all_adj_list;
 
 
        /* currently active device features */
@@ -1487,9 +1497,9 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev,
                f(dev, &dev->_tx[i], arg);
 }
 
-extern struct netdev_queue *netdev_pick_tx(struct net_device *dev,
-                                          struct sk_buff *skb);
-extern u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb);
+struct netdev_queue *netdev_pick_tx(struct net_device *dev,
+                                   struct sk_buff *skb);
+u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb);
 
 /*
  * Net namespace inlines
@@ -1673,8 +1683,8 @@ struct packet_offload {
 #define NETDEV_CHANGEUPPER     0x0015
 #define NETDEV_RESEND_IGMP     0x0016
 
-extern int register_netdevice_notifier(struct notifier_block *nb);
-extern int unregister_netdevice_notifier(struct notifier_block *nb);
+int register_netdevice_notifier(struct notifier_block *nb);
+int unregister_netdevice_notifier(struct notifier_block *nb);
 
 struct netdev_notifier_info {
        struct net_device *dev;
@@ -1697,9 +1707,9 @@ netdev_notifier_info_to_dev(const struct netdev_notifier_info *info)
        return info->dev;
 }
 
-extern int call_netdevice_notifiers_info(unsigned long val, struct net_device *dev,
-                                        struct netdev_notifier_info *info);
-extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
+int call_netdevice_notifiers_info(unsigned long val, struct net_device *dev,
+                                 struct netdev_notifier_info *info);
+int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
 
 
 extern rwlock_t                                dev_base_lock;          /* Device list lock */
@@ -1754,54 +1764,52 @@ static inline struct net_device *first_net_device_rcu(struct net *net)
        return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
 }
 
-extern int                     netdev_boot_setup_check(struct net_device *dev);
-extern unsigned long           netdev_boot_base(const char *prefix, int unit);
-extern struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
-                                             const char *hwaddr);
-extern struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
-extern struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
-extern void            dev_add_pack(struct packet_type *pt);
-extern void            dev_remove_pack(struct packet_type *pt);
-extern void            __dev_remove_pack(struct packet_type *pt);
-extern void            dev_add_offload(struct packet_offload *po);
-extern void            dev_remove_offload(struct packet_offload *po);
-extern void            __dev_remove_offload(struct packet_offload *po);
-
-extern struct net_device       *dev_get_by_flags_rcu(struct net *net, unsigned short flags,
-                                                     unsigned short mask);
-extern struct net_device       *dev_get_by_name(struct net *net, const char *name);
-extern struct net_device       *dev_get_by_name_rcu(struct net *net, const char *name);
-extern struct net_device       *__dev_get_by_name(struct net *net, const char *name);
-extern int             dev_alloc_name(struct net_device *dev, const char *name);
-extern int             dev_open(struct net_device *dev);
-extern int             dev_close(struct net_device *dev);
-extern void            dev_disable_lro(struct net_device *dev);
-extern int             dev_loopback_xmit(struct sk_buff *newskb);
-extern int             dev_queue_xmit(struct sk_buff *skb);
-extern int             register_netdevice(struct net_device *dev);
-extern void            unregister_netdevice_queue(struct net_device *dev,
-                                                  struct list_head *head);
-extern void            unregister_netdevice_many(struct list_head *head);
+int netdev_boot_setup_check(struct net_device *dev);
+unsigned long netdev_boot_base(const char *prefix, int unit);
+struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
+                                      const char *hwaddr);
+struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
+struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
+void dev_add_pack(struct packet_type *pt);
+void dev_remove_pack(struct packet_type *pt);
+void __dev_remove_pack(struct packet_type *pt);
+void dev_add_offload(struct packet_offload *po);
+void dev_remove_offload(struct packet_offload *po);
+void __dev_remove_offload(struct packet_offload *po);
+
+struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short flags,
+                                       unsigned short mask);
+struct net_device *dev_get_by_name(struct net *net, const char *name);
+struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
+struct net_device *__dev_get_by_name(struct net *net, const char *name);
+int dev_alloc_name(struct net_device *dev, const char *name);
+int dev_open(struct net_device *dev);
+int dev_close(struct net_device *dev);
+void dev_disable_lro(struct net_device *dev);
+int dev_loopback_xmit(struct sk_buff *newskb);
+int dev_queue_xmit(struct sk_buff *skb);
+int register_netdevice(struct net_device *dev);
+void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
+void unregister_netdevice_many(struct list_head *head);
 static inline void unregister_netdevice(struct net_device *dev)
 {
        unregister_netdevice_queue(dev, NULL);
 }
 
-extern int             netdev_refcnt_read(const struct net_device *dev);
-extern void            free_netdev(struct net_device *dev);
-extern void            synchronize_net(void);
-extern int             init_dummy_netdev(struct net_device *dev);
+int netdev_refcnt_read(const struct net_device *dev);
+void free_netdev(struct net_device *dev);
+void synchronize_net(void);
+int init_dummy_netdev(struct net_device *dev);
 
-extern struct net_device       *dev_get_by_index(struct net *net, int ifindex);
-extern struct net_device       *__dev_get_by_index(struct net *net, int ifindex);
-extern struct net_device       *dev_get_by_index_rcu(struct net *net, int ifindex);
-extern int             netdev_get_name(struct net *net, char *name, int ifindex);
-extern int             dev_restart(struct net_device *dev);
+struct net_device *dev_get_by_index(struct net *net, int ifindex);
+struct net_device *__dev_get_by_index(struct net *net, int ifindex);
+struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
+int netdev_get_name(struct net *net, char *name, int ifindex);
+int dev_restart(struct net_device *dev);
 #ifdef CONFIG_NETPOLL_TRAP
-extern int             netpoll_trap(void);
+int netpoll_trap(void);
 #endif
-extern int            skb_gro_receive(struct sk_buff **head,
-                                      struct sk_buff *skb);
+int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb);
 
 static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
 {
@@ -1873,7 +1881,7 @@ static inline int dev_parse_header(const struct sk_buff *skb,
 }
 
 typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
-extern int             register_gifconf(unsigned int family, gifconf_func_t * gifconf);
+int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
 static inline int unregister_gifconf(unsigned int family)
 {
        return register_gifconf(family, NULL);
@@ -1944,7 +1952,7 @@ static inline void input_queue_tail_incr_save(struct softnet_data *sd,
 
 DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
 
-extern void __netif_schedule(struct Qdisc *q);
+void __netif_schedule(struct Qdisc *q);
 
 static inline void netif_schedule_queue(struct netdev_queue *txq)
 {
@@ -2264,8 +2272,8 @@ static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
 }
 
 #ifdef CONFIG_XPS
-extern int netif_set_xps_queue(struct net_device *dev, struct cpumask *mask,
-                              u16 index);
+int netif_set_xps_queue(struct net_device *dev, struct cpumask *mask,
+                       u16 index);
 #else
 static inline int netif_set_xps_queue(struct net_device *dev,
                                      struct cpumask *mask,
@@ -2296,12 +2304,10 @@ static inline bool netif_is_multiqueue(const struct net_device *dev)
        return dev->num_tx_queues > 1;
 }
 
-extern int netif_set_real_num_tx_queues(struct net_device *dev,
-                                       unsigned int txq);
+int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq);
 
 #ifdef CONFIG_RPS
-extern int netif_set_real_num_rx_queues(struct net_device *dev,
-                                       unsigned int rxq);
+int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq);
 #else
 static inline int netif_set_real_num_rx_queues(struct net_device *dev,
                                                unsigned int rxq)
@@ -2328,28 +2334,27 @@ static inline int netif_copy_real_num_queues(struct net_device *to_dev,
 }
 
 #define DEFAULT_MAX_NUM_RSS_QUEUES     (8)
-extern int netif_get_num_default_rss_queues(void);
+int netif_get_num_default_rss_queues(void);
 
 /* Use this variant when it is known for sure that it
  * is executing from hardware interrupt context or with hardware interrupts
  * disabled.
  */
-extern void dev_kfree_skb_irq(struct sk_buff *skb);
+void dev_kfree_skb_irq(struct sk_buff *skb);
 
 /* Use this variant in places where it could be invoked
  * from either hardware interrupt or other context, with hardware interrupts
  * either disabled or enabled.
  */
-extern void dev_kfree_skb_any(struct sk_buff *skb);
+void dev_kfree_skb_any(struct sk_buff *skb);
 
-extern int             netif_rx(struct sk_buff *skb);
-extern int             netif_rx_ni(struct sk_buff *skb);
-extern int             netif_receive_skb(struct sk_buff *skb);
-extern gro_result_t    napi_gro_receive(struct napi_struct *napi,
-                                        struct sk_buff *skb);
-extern void            napi_gro_flush(struct napi_struct *napi, bool flush_old);
-extern struct sk_buff *        napi_get_frags(struct napi_struct *napi);
-extern gro_result_t    napi_gro_frags(struct napi_struct *napi);
+int netif_rx(struct sk_buff *skb);
+int netif_rx_ni(struct sk_buff *skb);
+int netif_receive_skb(struct sk_buff *skb);
+gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
+void napi_gro_flush(struct napi_struct *napi, bool flush_old);
+struct sk_buff *napi_get_frags(struct napi_struct *napi);
+gro_result_t napi_gro_frags(struct napi_struct *napi);
 
 static inline void napi_free_frags(struct napi_struct *napi)
 {
@@ -2357,40 +2362,36 @@ static inline void napi_free_frags(struct napi_struct *napi)
        napi->skb = NULL;
 }
 
-extern int netdev_rx_handler_register(struct net_device *dev,
-                                     rx_handler_func_t *rx_handler,
-                                     void *rx_handler_data);
-extern void netdev_rx_handler_unregister(struct net_device *dev);
-
-extern bool            dev_valid_name(const char *name);
-extern int             dev_ioctl(struct net *net, unsigned int cmd, void __user *);
-extern int             dev_ethtool(struct net *net, struct ifreq *);
-extern unsigned int    dev_get_flags(const struct net_device *);
-extern int             __dev_change_flags(struct net_device *, unsigned int flags);
-extern int             dev_change_flags(struct net_device *, unsigned int);
-extern void            __dev_notify_flags(struct net_device *, unsigned int old_flags);
-extern int             dev_change_name(struct net_device *, const char *);
-extern int             dev_set_alias(struct net_device *, const char *, size_t);
-extern int             dev_change_net_namespace(struct net_device *,
-                                                struct net *, const char *);
-extern int             dev_set_mtu(struct net_device *, int);
-extern void            dev_set_group(struct net_device *, int);
-extern int             dev_set_mac_address(struct net_device *,
-                                           struct sockaddr *);
-extern int             dev_change_carrier(struct net_device *,
-                                          bool new_carrier);
-extern int             dev_get_phys_port_id(struct net_device *dev,
-                                            struct netdev_phys_port_id *ppid);
-extern int             dev_hard_start_xmit(struct sk_buff *skb,
-                                           struct net_device *dev,
-                                           struct netdev_queue *txq);
-extern int             dev_forward_skb(struct net_device *dev,
-                                       struct sk_buff *skb);
+int netdev_rx_handler_register(struct net_device *dev,
+                              rx_handler_func_t *rx_handler,
+                              void *rx_handler_data);
+void netdev_rx_handler_unregister(struct net_device *dev);
+
+bool dev_valid_name(const char *name);
+int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
+int dev_ethtool(struct net *net, struct ifreq *);
+unsigned int dev_get_flags(const struct net_device *);
+int __dev_change_flags(struct net_device *, unsigned int flags);
+int dev_change_flags(struct net_device *, unsigned int);
+void __dev_notify_flags(struct net_device *, unsigned int old_flags,
+                       unsigned int gchanges);
+int dev_change_name(struct net_device *, const char *);
+int dev_set_alias(struct net_device *, const char *, size_t);
+int dev_change_net_namespace(struct net_device *, struct net *, const char *);
+int dev_set_mtu(struct net_device *, int);
+void dev_set_group(struct net_device *, int);
+int dev_set_mac_address(struct net_device *, struct sockaddr *);
+int dev_change_carrier(struct net_device *, bool new_carrier);
+int dev_get_phys_port_id(struct net_device *dev,
+                        struct netdev_phys_port_id *ppid);
+int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
+                       struct netdev_queue *txq);
+int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
 
 extern int             netdev_budget;
 
 /* Called by rtnetlink.c:rtnl_unlock() */
-extern void netdev_run_todo(void);
+void netdev_run_todo(void);
 
 /**
  *     dev_put - release reference to device
@@ -2423,9 +2424,9 @@ static inline void dev_hold(struct net_device *dev)
  * kind of lower layer not just hardware media.
  */
 
-extern void linkwatch_init_dev(struct net_device *dev);
-extern void linkwatch_fire_event(struct net_device *dev);
-extern void linkwatch_forget_dev(struct net_device *dev);
+void linkwatch_init_dev(struct net_device *dev);
+void linkwatch_fire_event(struct net_device *dev);
+void linkwatch_forget_dev(struct net_device *dev);
 
 /**
  *     netif_carrier_ok - test if carrier present
@@ -2438,13 +2439,13 @@ static inline bool netif_carrier_ok(const struct net_device *dev)
        return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
 }
 
-extern unsigned long dev_trans_start(struct net_device *dev);
+unsigned long dev_trans_start(struct net_device *dev);
 
-extern void __netdev_watchdog_up(struct net_device *dev);
+void __netdev_watchdog_up(struct net_device *dev);
 
-extern void netif_carrier_on(struct net_device *dev);
+void netif_carrier_on(struct net_device *dev);
 
-extern void netif_carrier_off(struct net_device *dev);
+void netif_carrier_off(struct net_device *dev);
 
 /**
  *     netif_dormant_on - mark device as dormant.
@@ -2512,9 +2513,9 @@ static inline bool netif_device_present(struct net_device *dev)
        return test_bit(__LINK_STATE_PRESENT, &dev->state);
 }
 
-extern void netif_device_detach(struct net_device *dev);
+void netif_device_detach(struct net_device *dev);
 
-extern void netif_device_attach(struct net_device *dev);
+void netif_device_attach(struct net_device *dev);
 
 /*
  * Network interface message level settings
@@ -2723,119 +2724,138 @@ static inline void netif_addr_unlock_bh(struct net_device *dev)
 
 /* These functions live elsewhere (drivers/net/net_init.c, but related) */
 
-extern void            ether_setup(struct net_device *dev);
+void ether_setup(struct net_device *dev);
 
 /* Support for loadable net-drivers */
-extern struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
-                                      void (*setup)(struct net_device *),
-                                      unsigned int txqs, unsigned int rxqs);
+struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
+                                   void (*setup)(struct net_device *),
+                                   unsigned int txqs, unsigned int rxqs);
 #define alloc_netdev(sizeof_priv, name, setup) \
        alloc_netdev_mqs(sizeof_priv, name, setup, 1, 1)
 
 #define alloc_netdev_mq(sizeof_priv, name, setup, count) \
        alloc_netdev_mqs(sizeof_priv, name, setup, count, count)
 
-extern int             register_netdev(struct net_device *dev);
-extern void            unregister_netdev(struct net_device *dev);
+int register_netdev(struct net_device *dev);
+void unregister_netdev(struct net_device *dev);
 
 /* General hardware address lists handling functions */
-extern int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
-                                 struct netdev_hw_addr_list *from_list,
-                                 int addr_len, unsigned char addr_type);
-extern void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
-                                  struct netdev_hw_addr_list *from_list,
-                                  int addr_len, unsigned char addr_type);
-extern int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
-                         struct netdev_hw_addr_list *from_list,
-                         int addr_len);
-extern void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
-                            struct netdev_hw_addr_list *from_list,
-                            int addr_len);
-extern void __hw_addr_flush(struct netdev_hw_addr_list *list);
-extern void __hw_addr_init(struct netdev_hw_addr_list *list);
+int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
+                          struct netdev_hw_addr_list *from_list,
+                          int addr_len, unsigned char addr_type);
+void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
+                           struct netdev_hw_addr_list *from_list,
+                           int addr_len, unsigned char addr_type);
+int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
+                  struct netdev_hw_addr_list *from_list, int addr_len);
+void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
+                     struct netdev_hw_addr_list *from_list, int addr_len);
+void __hw_addr_flush(struct netdev_hw_addr_list *list);
+void __hw_addr_init(struct netdev_hw_addr_list *list);
 
 /* Functions used for device addresses handling */
-extern int dev_addr_add(struct net_device *dev, const unsigned char *addr,
-                       unsigned char addr_type);
-extern int dev_addr_del(struct net_device *dev, const unsigned char *addr,
-                       unsigned char addr_type);
-extern int dev_addr_add_multiple(struct net_device *to_dev,
-                                struct net_device *from_dev,
-                                unsigned char addr_type);
-extern int dev_addr_del_multiple(struct net_device *to_dev,
-                                struct net_device *from_dev,
-                                unsigned char addr_type);
-extern void dev_addr_flush(struct net_device *dev);
-extern int dev_addr_init(struct net_device *dev);
+int dev_addr_add(struct net_device *dev, const unsigned char *addr,
+                unsigned char addr_type);
+int dev_addr_del(struct net_device *dev, const unsigned char *addr,
+                unsigned char addr_type);
+int dev_addr_add_multiple(struct net_device *to_dev,
+                         struct net_device *from_dev, unsigned char addr_type);
+int dev_addr_del_multiple(struct net_device *to_dev,
+                         struct net_device *from_dev, unsigned char addr_type);
+void dev_addr_flush(struct net_device *dev);
+int dev_addr_init(struct net_device *dev);
 
 /* Functions used for unicast addresses handling */
-extern int dev_uc_add(struct net_device *dev, const unsigned char *addr);
-extern int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr);
-extern int dev_uc_del(struct net_device *dev, const unsigned char *addr);
-extern int dev_uc_sync(struct net_device *to, struct net_device *from);
-extern int dev_uc_sync_multiple(struct net_device *to, struct net_device *from);
-extern void dev_uc_unsync(struct net_device *to, struct net_device *from);
-extern void dev_uc_flush(struct net_device *dev);
-extern void dev_uc_init(struct net_device *dev);
+int dev_uc_add(struct net_device *dev, const unsigned char *addr);
+int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr);
+int dev_uc_del(struct net_device *dev, const unsigned char *addr);
+int dev_uc_sync(struct net_device *to, struct net_device *from);
+int dev_uc_sync_multiple(struct net_device *to, struct net_device *from);
+void dev_uc_unsync(struct net_device *to, struct net_device *from);
+void dev_uc_flush(struct net_device *dev);
+void dev_uc_init(struct net_device *dev);
 
 /* Functions used for multicast addresses handling */
-extern int dev_mc_add(struct net_device *dev, const unsigned char *addr);
-extern int dev_mc_add_global(struct net_device *dev, const unsigned char *addr);
-extern int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr);
-extern int dev_mc_del(struct net_device *dev, const unsigned char *addr);
-extern int dev_mc_del_global(struct net_device *dev, const unsigned char *addr);
-extern int dev_mc_sync(struct net_device *to, struct net_device *from);
-extern int dev_mc_sync_multiple(struct net_device *to, struct net_device *from);
-extern void dev_mc_unsync(struct net_device *to, struct net_device *from);
-extern void dev_mc_flush(struct net_device *dev);
-extern void dev_mc_init(struct net_device *dev);
+int dev_mc_add(struct net_device *dev, const unsigned char *addr);
+int dev_mc_add_global(struct net_device *dev, const unsigned char *addr);
+int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr);
+int dev_mc_del(struct net_device *dev, const unsigned char *addr);
+int dev_mc_del_global(struct net_device *dev, const unsigned char *addr);
+int dev_mc_sync(struct net_device *to, struct net_device *from);
+int dev_mc_sync_multiple(struct net_device *to, struct net_device *from);
+void dev_mc_unsync(struct net_device *to, struct net_device *from);
+void dev_mc_flush(struct net_device *dev);
+void dev_mc_init(struct net_device *dev);
 
 /* Functions used for secondary unicast and multicast support */
-extern void            dev_set_rx_mode(struct net_device *dev);
-extern void            __dev_set_rx_mode(struct net_device *dev);
-extern int             dev_set_promiscuity(struct net_device *dev, int inc);
-extern int             dev_set_allmulti(struct net_device *dev, int inc);
-extern void            netdev_state_change(struct net_device *dev);
-extern void            netdev_notify_peers(struct net_device *dev);
-extern void            netdev_features_change(struct net_device *dev);
+void dev_set_rx_mode(struct net_device *dev);
+void __dev_set_rx_mode(struct net_device *dev);
+int dev_set_promiscuity(struct net_device *dev, int inc);
+int dev_set_allmulti(struct net_device *dev, int inc);
+void netdev_state_change(struct net_device *dev);
+void netdev_notify_peers(struct net_device *dev);
+void netdev_features_change(struct net_device *dev);
 /* Load a device via the kmod */
-extern void            dev_load(struct net *net, const char *name);
-extern struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
-                                              struct rtnl_link_stats64 *storage);
-extern void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
-                                   const struct net_device_stats *netdev_stats);
+void dev_load(struct net *net, const char *name);
+struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
+                                       struct rtnl_link_stats64 *storage);
+void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
+                            const struct net_device_stats *netdev_stats);
 
 extern int             netdev_max_backlog;
 extern int             netdev_tstamp_prequeue;
 extern int             weight_p;
 extern int             bpf_jit_enable;
 
-extern bool netdev_has_upper_dev(struct net_device *dev,
-                                struct net_device *upper_dev);
-extern bool netdev_has_any_upper_dev(struct net_device *dev);
-extern struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
-                                                       struct list_head **iter);
+bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
+bool netdev_has_any_upper_dev(struct net_device *dev);
+struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
+                                                    struct list_head **iter);
 
 /* iterate through upper list, must be called under RCU read lock */
-#define netdev_for_each_upper_dev_rcu(dev, upper, iter) \
-       for (iter = &(dev)->upper_dev_list, \
-            upper = netdev_upper_get_next_dev_rcu(dev, &(iter)); \
-            upper; \
-            upper = netdev_upper_get_next_dev_rcu(dev, &(iter)))
-
-extern struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
-extern struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
-extern int netdev_upper_dev_link(struct net_device *dev,
+#define netdev_for_each_all_upper_dev_rcu(dev, updev, iter) \
+       for (iter = &(dev)->all_adj_list.upper, \
+            updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter)); \
+            updev; \
+            updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter)))
+
+void *netdev_lower_get_next_private(struct net_device *dev,
+                                   struct list_head **iter);
+void *netdev_lower_get_next_private_rcu(struct net_device *dev,
+                                       struct list_head **iter);
+
+#define netdev_for_each_lower_private(dev, priv, iter) \
+       for (iter = (dev)->adj_list.lower.next, \
+            priv = netdev_lower_get_next_private(dev, &(iter)); \
+            priv; \
+            priv = netdev_lower_get_next_private(dev, &(iter)))
+
+#define netdev_for_each_lower_private_rcu(dev, priv, iter) \
+       for (iter = &(dev)->adj_list.lower, \
+            priv = netdev_lower_get_next_private_rcu(dev, &(iter)); \
+            priv; \
+            priv = netdev_lower_get_next_private_rcu(dev, &(iter)))
+
+void *netdev_adjacent_get_private(struct list_head *adj_list);
+struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
+struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
+int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev);
+int netdev_master_upper_dev_link(struct net_device *dev,
                                 struct net_device *upper_dev);
-extern int netdev_master_upper_dev_link(struct net_device *dev,
-                                       struct net_device *upper_dev);
-extern void netdev_upper_dev_unlink(struct net_device *dev,
-                                   struct net_device *upper_dev);
-extern int skb_checksum_help(struct sk_buff *skb);
-extern struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
-       netdev_features_t features, bool tx_path);
-extern struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
-                                         netdev_features_t features);
+int netdev_master_upper_dev_link_private(struct net_device *dev,
+                                        struct net_device *upper_dev,
+                                        void *private);
+void netdev_upper_dev_unlink(struct net_device *dev,
+                            struct net_device *upper_dev);
+void *netdev_lower_dev_get_private_rcu(struct net_device *dev,
+                                      struct net_device *lower_dev);
+void *netdev_lower_dev_get_private(struct net_device *dev,
+                                  struct net_device *lower_dev);
+int skb_checksum_help(struct sk_buff *skb);
+struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
+                                 netdev_features_t features, bool tx_path);
+struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
+                                   netdev_features_t features);
 
 static inline
 struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
@@ -2857,30 +2877,30 @@ static inline bool can_checksum_protocol(netdev_features_t features,
 }
 
 #ifdef CONFIG_BUG
-extern void netdev_rx_csum_fault(struct net_device *dev);
+void netdev_rx_csum_fault(struct net_device *dev);
 #else
 static inline void netdev_rx_csum_fault(struct net_device *dev)
 {
 }
 #endif
 /* rx skb timestamps */
-extern void            net_enable_timestamp(void);
-extern void            net_disable_timestamp(void);
+void net_enable_timestamp(void);
+void net_disable_timestamp(void);
 
 #ifdef CONFIG_PROC_FS
-extern int __init dev_proc_init(void);
+int __init dev_proc_init(void);
 #else
 #define dev_proc_init() 0
 #endif
 
-extern int netdev_class_create_file(struct class_attribute *class_attr);
-extern void netdev_class_remove_file(struct class_attribute *class_attr);
+int netdev_class_create_file(struct class_attribute *class_attr);
+void netdev_class_remove_file(struct class_attribute *class_attr);
 
 extern struct kobj_ns_type_operations net_ns_type_operations;
 
-extern const char *netdev_drivername(const struct net_device *dev);
+const char *netdev_drivername(const struct net_device *dev);
 
-extern void linkwatch_run_queue(void);
+void linkwatch_run_queue(void);
 
 static inline netdev_features_t netdev_get_wanted_features(
        struct net_device *dev)
@@ -2972,22 +2992,22 @@ static inline const char *netdev_name(const struct net_device *dev)
        return dev->name;
 }
 
-extern __printf(3, 4)
+__printf(3, 4)
 int netdev_printk(const char *level, const struct net_device *dev,
                  const char *format, ...);
-extern __printf(2, 3)
+__printf(2, 3)
 int netdev_emerg(const struct net_device *dev, const char *format, ...);
-extern __printf(2, 3)
+__printf(2, 3)
 int netdev_alert(const struct net_device *dev, const char *format, ...);
-extern __printf(2, 3)
+__printf(2, 3)
 int netdev_crit(const struct net_device *dev, const char *format, ...);
-extern __printf(2, 3)
+__printf(2, 3)
 int netdev_err(const struct net_device *dev, const char *format, ...);
-extern __printf(2, 3)
+__printf(2, 3)
 int netdev_warn(const struct net_device *dev, const char *format, ...);
-extern __printf(2, 3)
+__printf(2, 3)
 int netdev_notice(const struct net_device *dev, const char *format, ...);
-extern __printf(2, 3)
+__printf(2, 3)
 int netdev_info(const struct net_device *dev, const char *format, ...);
 
 #define MODULE_ALIAS_NETDEV(device) \
index 708fe72ab913ba71e5421b080d0c9a8c90c7a3d9..61223c52414f0aea4b9b3f58cee7a1a6da7d5987 100644 (file)
@@ -35,7 +35,7 @@ static inline void nf_inet_addr_mask(const union nf_inet_addr *a1,
        result->all[3] = a1->all[3] & mask->all[3];
 }
 
-extern int netfilter_init(void);
+int netfilter_init(void);
 
 /* Largest hook number + 1 */
 #define NF_MAX_HOOKS 8
@@ -208,7 +208,7 @@ int compat_nf_getsockopt(struct sock *sk, u_int8_t pf, int optval,
 /* Call this before modifying an existing packet: ensures it is
    modifiable and linear to the point you care about (writable_len).
    Returns true or false. */
-extern int skb_make_writable(struct sk_buff *skb, unsigned int writable_len);
+int skb_make_writable(struct sk_buff *skb, unsigned int writable_len);
 
 struct flowi;
 struct nf_queue_entry;
@@ -269,8 +269,8 @@ nf_checksum_partial(struct sk_buff *skb, unsigned int hook,
        return csum;
 }
 
-extern int nf_register_afinfo(const struct nf_afinfo *afinfo);
-extern void nf_unregister_afinfo(const struct nf_afinfo *afinfo);
+int nf_register_afinfo(const struct nf_afinfo *afinfo);
+void nf_unregister_afinfo(const struct nf_afinfo *afinfo);
 
 #include <net/flow.h>
 extern void (*nf_nat_decode_session_hook)(struct sk_buff *, struct flowi *);
@@ -315,7 +315,7 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
 
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 extern void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *) __rcu;
-extern void nf_ct_attach(struct sk_buff *, const struct sk_buff *);
+void nf_ct_attach(struct sk_buff *, const struct sk_buff *);
 extern void (*nf_ct_destroy)(struct nf_conntrack *) __rcu;
 
 struct nf_conn;
index 127d0b90604fa08486a88437fbd9a8e09b315148..275505792664ae4e835401a0503d5d3d9fa66808 100644 (file)
@@ -23,6 +23,6 @@ struct ip_conntrack_stat {
 };
 
 /* call to create an explicit dependency on nf_conntrack. */
-extern void need_conntrack(void);
+void need_conntrack(void);
 
 #endif /* _NF_CONNTRACK_COMMON_H */
index f381020eee92835fa68ae19858e1a4ca0437138c..858d9b214053ff141e5e856ce98edbf50412bc00 100644 (file)
@@ -29,13 +29,13 @@ struct nf_ct_h323_master {
 
 struct nf_conn;
 
-extern int get_h225_addr(struct nf_conn *ct, unsigned char *data,
-                        TransportAddress *taddr,
-                        union nf_inet_addr *addr, __be16 *port);
-extern void nf_conntrack_h245_expect(struct nf_conn *new,
-                                    struct nf_conntrack_expect *this);
-extern void nf_conntrack_q931_expect(struct nf_conn *new,
-                                    struct nf_conntrack_expect *this);
+int get_h225_addr(struct nf_conn *ct, unsigned char *data,
+                 TransportAddress *taddr, union nf_inet_addr *addr,
+                 __be16 *port);
+void nf_conntrack_h245_expect(struct nf_conn *new,
+                             struct nf_conntrack_expect *this);
+void nf_conntrack_q931_expect(struct nf_conn *new,
+                             struct nf_conntrack_expect *this);
 extern int (*set_h245_addr_hook) (struct sk_buff *skb, unsigned int protoff,
                                  unsigned char **data, int dataoff,
                                  H245_TransportAddress *taddr,
index 6a0664c0c45197a7abece81ecb35fae6e638d8ae..ec2ffaf418c8e8d31e1cca95fea02a85aa48089f 100644 (file)
@@ -87,8 +87,8 @@ int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir,
 /* delete keymap entries */
 void nf_ct_gre_keymap_destroy(struct nf_conn *ct);
 
-extern void nf_ct_gre_keymap_flush(struct net *net);
-extern void nf_nat_need_gre(void);
+void nf_ct_gre_keymap_flush(struct net *net);
+void nf_nat_need_gre(void);
 
 #endif /* __KERNEL__ */
 #endif /* _CONNTRACK_PROTO_GRE_H */
index 4cb71551f6116280ffabe3e6ced39f9c3d491306..d5af3c27fb7de0385b11396ac241991bacb3684e 100644 (file)
@@ -166,35 +166,34 @@ struct nf_nat_sip_hooks {
 };
 extern const struct nf_nat_sip_hooks *nf_nat_sip_hooks;
 
-extern int ct_sip_parse_request(const struct nf_conn *ct,
-                               const char *dptr, unsigned int datalen,
-                               unsigned int *matchoff, unsigned int *matchlen,
-                               union nf_inet_addr *addr, __be16 *port);
-extern int ct_sip_get_header(const struct nf_conn *ct, const char *dptr,
-                            unsigned int dataoff, unsigned int datalen,
-                            enum sip_header_types type,
-                            unsigned int *matchoff, unsigned int *matchlen);
-extern int ct_sip_parse_header_uri(const struct nf_conn *ct, const char *dptr,
-                                  unsigned int *dataoff, unsigned int datalen,
-                                  enum sip_header_types type, int *in_header,
-                                  unsigned int *matchoff, unsigned int *matchlen,
-                                  union nf_inet_addr *addr, __be16 *port);
-extern int ct_sip_parse_address_param(const struct nf_conn *ct, const char *dptr,
-                                     unsigned int dataoff, unsigned int datalen,
-                                     const char *name,
-                                     unsigned int *matchoff, unsigned int *matchlen,
-                                     union nf_inet_addr *addr, bool delim);
-extern int ct_sip_parse_numerical_param(const struct nf_conn *ct, const char *dptr,
-                                       unsigned int off, unsigned int datalen,
-                                       const char *name,
-                                       unsigned int *matchoff, unsigned int *matchen,
-                                       unsigned int *val);
-
-extern int ct_sip_get_sdp_header(const struct nf_conn *ct, const char *dptr,
-                                unsigned int dataoff, unsigned int datalen,
-                                enum sdp_header_types type,
-                                enum sdp_header_types term,
-                                unsigned int *matchoff, unsigned int *matchlen);
+int ct_sip_parse_request(const struct nf_conn *ct, const char *dptr,
+                        unsigned int datalen, unsigned int *matchoff,
+                        unsigned int *matchlen, union nf_inet_addr *addr,
+                        __be16 *port);
+int ct_sip_get_header(const struct nf_conn *ct, const char *dptr,
+                     unsigned int dataoff, unsigned int datalen,
+                     enum sip_header_types type, unsigned int *matchoff,
+                     unsigned int *matchlen);
+int ct_sip_parse_header_uri(const struct nf_conn *ct, const char *dptr,
+                           unsigned int *dataoff, unsigned int datalen,
+                           enum sip_header_types type, int *in_header,
+                           unsigned int *matchoff, unsigned int *matchlen,
+                           union nf_inet_addr *addr, __be16 *port);
+int ct_sip_parse_address_param(const struct nf_conn *ct, const char *dptr,
+                              unsigned int dataoff, unsigned int datalen,
+                              const char *name, unsigned int *matchoff,
+                              unsigned int *matchlen, union nf_inet_addr *addr,
+                              bool delim);
+int ct_sip_parse_numerical_param(const struct nf_conn *ct, const char *dptr,
+                                unsigned int off, unsigned int datalen,
+                                const char *name, unsigned int *matchoff,
+                                unsigned int *matchen, unsigned int *val);
+
+int ct_sip_get_sdp_header(const struct nf_conn *ct, const char *dptr,
+                         unsigned int dataoff, unsigned int datalen,
+                         enum sdp_header_types type,
+                         enum sdp_header_types term,
+                         unsigned int *matchoff, unsigned int *matchlen);
 
 #endif /* __KERNEL__ */
 #endif /* __NF_CONNTRACK_SIP_H__ */
index cadb7402d7a713fdcedbe4b3cb77c6095854f3ff..4f68cd7141d24ee478bc9f3165c30f4e5f7e675c 100644 (file)
@@ -25,20 +25,20 @@ struct nfnetlink_subsystem {
        const struct nfnl_callback *cb; /* callback for individual types */
 };
 
-extern int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n);
-extern int nfnetlink_subsys_unregister(const struct nfnetlink_subsystem *n);
-
-extern int nfnetlink_has_listeners(struct net *net, unsigned int group);
-extern struct sk_buff *nfnetlink_alloc_skb(struct net *net, unsigned int size,
-                                          u32 dst_portid, gfp_t gfp_mask);
-extern int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 portid,
-                         unsigned int group, int echo, gfp_t flags);
-extern int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error);
-extern int nfnetlink_unicast(struct sk_buff *skb, struct net *net,
-                            u32 portid, int flags);
-
-extern void nfnl_lock(__u8 subsys_id);
-extern void nfnl_unlock(__u8 subsys_id);
+int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n);
+int nfnetlink_subsys_unregister(const struct nfnetlink_subsystem *n);
+
+int nfnetlink_has_listeners(struct net *net, unsigned int group);
+struct sk_buff *nfnetlink_alloc_skb(struct net *net, unsigned int size,
+                                   u32 dst_portid, gfp_t gfp_mask);
+int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 portid,
+                  unsigned int group, int echo, gfp_t flags);
+int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error);
+int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid,
+                     int flags);
+
+void nfnl_lock(__u8 subsys_id);
+void nfnl_unlock(__u8 subsys_id);
 
 #define MODULE_ALIAS_NFNL_SUBSYS(subsys) \
        MODULE_ALIAS("nfnetlink-subsys-" __stringify(subsys))
index bb4bbc9b7a18c4834724eabe574b76670806c8c4..b2e85e59f76085cb0e56294c78b63d3a04a0762a 100644 (file)
@@ -6,8 +6,8 @@
 
 struct nf_acct;
 
-extern struct nf_acct *nfnl_acct_find_get(const char *filter_name);
-extern void nfnl_acct_put(struct nf_acct *acct);
-extern void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct);
+struct nf_acct *nfnl_acct_find_get(const char *filter_name);
+void nfnl_acct_put(struct nf_acct *acct);
+void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct);
 
 #endif /* _NFNL_ACCT_H */
index dd49566315c616f1d4db4cba47ee1dc9ecaefa26..a3e215bb0241d47379bce4ff6e81ba2b4995d3c3 100644 (file)
@@ -229,50 +229,48 @@ struct xt_table_info {
 
 #define XT_TABLE_INFO_SZ (offsetof(struct xt_table_info, entries) \
                          + nr_cpu_ids * sizeof(char *))
-extern int xt_register_target(struct xt_target *target);
-extern void xt_unregister_target(struct xt_target *target);
-extern int xt_register_targets(struct xt_target *target, unsigned int n);
-extern void xt_unregister_targets(struct xt_target *target, unsigned int n);
-
-extern int xt_register_match(struct xt_match *target);
-extern void xt_unregister_match(struct xt_match *target);
-extern int xt_register_matches(struct xt_match *match, unsigned int n);
-extern void xt_unregister_matches(struct xt_match *match, unsigned int n);
-
-extern int xt_check_match(struct xt_mtchk_param *,
-                         unsigned int size, u_int8_t proto, bool inv_proto);
-extern int xt_check_target(struct xt_tgchk_param *,
-                          unsigned int size, u_int8_t proto, bool inv_proto);
-
-extern struct xt_table *xt_register_table(struct net *net,
-                                         const struct xt_table *table,
-                                         struct xt_table_info *bootstrap,
-                                         struct xt_table_info *newinfo);
-extern void *xt_unregister_table(struct xt_table *table);
-
-extern struct xt_table_info *xt_replace_table(struct xt_table *table,
-                                             unsigned int num_counters,
-                                             struct xt_table_info *newinfo,
-                                             int *error);
-
-extern struct xt_match *xt_find_match(u8 af, const char *name, u8 revision);
-extern struct xt_target *xt_find_target(u8 af, const char *name, u8 revision);
-extern struct xt_match *xt_request_find_match(u8 af, const char *name,
-                                             u8 revision);
-extern struct xt_target *xt_request_find_target(u8 af, const char *name,
-                                               u8 revision);
-extern int xt_find_revision(u8 af, const char *name, u8 revision,
-                           int target, int *err);
-
-extern struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
-                                          const char *name);
-extern void xt_table_unlock(struct xt_table *t);
-
-extern int xt_proto_init(struct net *net, u_int8_t af);
-extern void xt_proto_fini(struct net *net, u_int8_t af);
-
-extern struct xt_table_info *xt_alloc_table_info(unsigned int size);
-extern void xt_free_table_info(struct xt_table_info *info);
+int xt_register_target(struct xt_target *target);
+void xt_unregister_target(struct xt_target *target);
+int xt_register_targets(struct xt_target *target, unsigned int n);
+void xt_unregister_targets(struct xt_target *target, unsigned int n);
+
+int xt_register_match(struct xt_match *target);
+void xt_unregister_match(struct xt_match *target);
+int xt_register_matches(struct xt_match *match, unsigned int n);
+void xt_unregister_matches(struct xt_match *match, unsigned int n);
+
+int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto,
+                  bool inv_proto);
+int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto,
+                   bool inv_proto);
+
+struct xt_table *xt_register_table(struct net *net,
+                                  const struct xt_table *table,
+                                  struct xt_table_info *bootstrap,
+                                  struct xt_table_info *newinfo);
+void *xt_unregister_table(struct xt_table *table);
+
+struct xt_table_info *xt_replace_table(struct xt_table *table,
+                                      unsigned int num_counters,
+                                      struct xt_table_info *newinfo,
+                                      int *error);
+
+struct xt_match *xt_find_match(u8 af, const char *name, u8 revision);
+struct xt_target *xt_find_target(u8 af, const char *name, u8 revision);
+struct xt_match *xt_request_find_match(u8 af, const char *name, u8 revision);
+struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision);
+int xt_find_revision(u8 af, const char *name, u8 revision, int target,
+                    int *err);
+
+struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
+                                   const char *name);
+void xt_table_unlock(struct xt_table *t);
+
+int xt_proto_init(struct net *net, u_int8_t af);
+void xt_proto_fini(struct net *net, u_int8_t af);
+
+struct xt_table_info *xt_alloc_table_info(unsigned int size);
+void xt_free_table_info(struct xt_table_info *info);
 
 /**
  * xt_recseq - recursive seqcount for netfilter use
@@ -353,8 +351,8 @@ static inline unsigned long ifname_compare_aligned(const char *_a,
        return ret;
 }
 
-extern struct nf_hook_ops *xt_hook_link(const struct xt_table *, nf_hookfn *);
-extern void xt_hook_unlink(const struct xt_table *, struct nf_hook_ops *);
+struct nf_hook_ops *xt_hook_link(const struct xt_table *, nf_hookfn *);
+void xt_hook_unlink(const struct xt_table *, struct nf_hook_ops *);
 
 #ifdef CONFIG_COMPAT
 #include <net/compat.h>
@@ -414,25 +412,25 @@ struct _compat_xt_align {
 
 #define COMPAT_XT_ALIGN(s) __ALIGN_KERNEL((s), __alignof__(struct _compat_xt_align))
 
-extern void xt_compat_lock(u_int8_t af);
-extern void xt_compat_unlock(u_int8_t af);
-
-extern int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta);
-extern void xt_compat_flush_offsets(u_int8_t af);
-extern void xt_compat_init_offsets(u_int8_t af, unsigned int number);
-extern int xt_compat_calc_jump(u_int8_t af, unsigned int offset);
-
-extern int xt_compat_match_offset(const struct xt_match *match);
-extern int xt_compat_match_from_user(struct xt_entry_match *m,
-                                    void **dstptr, unsigned int *size);
-extern int xt_compat_match_to_user(const struct xt_entry_match *m,
-                                  void __user **dstptr, unsigned int *size);
-
-extern int xt_compat_target_offset(const struct xt_target *target);
-extern void xt_compat_target_from_user(struct xt_entry_target *t,
-                                      void **dstptr, unsigned int *size);
-extern int xt_compat_target_to_user(const struct xt_entry_target *t,
-                                   void __user **dstptr, unsigned int *size);
+void xt_compat_lock(u_int8_t af);
+void xt_compat_unlock(u_int8_t af);
+
+int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta);
+void xt_compat_flush_offsets(u_int8_t af);
+void xt_compat_init_offsets(u_int8_t af, unsigned int number);
+int xt_compat_calc_jump(u_int8_t af, unsigned int offset);
+
+int xt_compat_match_offset(const struct xt_match *match);
+int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
+                             unsigned int *size);
+int xt_compat_match_to_user(const struct xt_entry_match *m,
+                           void __user **dstptr, unsigned int *size);
+
+int xt_compat_target_offset(const struct xt_target *target);
+void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
+                               unsigned int *size);
+int xt_compat_target_to_user(const struct xt_entry_target *t,
+                            void __user **dstptr, unsigned int *size);
 
 #endif /* CONFIG_COMPAT */
 #endif /* _X_TABLES_H */
index dfb4d9e52bcb3a1775eebd627d1d758aaaed6c4b..8ab1c278b66da77229647e08ef8e0d948d4e6122 100644 (file)
@@ -25,7 +25,7 @@ enum nf_br_hook_priorities {
 #define BRNF_PPPoE                     0x20
 
 /* Only used in br_forward.c */
-extern int nf_bridge_copy_header(struct sk_buff *skb);
+int nf_bridge_copy_header(struct sk_buff *skb);
 static inline int nf_bridge_maybe_copy_header(struct sk_buff *skb)
 {
        if (skb->nf_bridge &&
@@ -53,7 +53,7 @@ static inline unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb)
        return 0;
 }
 
-extern int br_handle_frame_finish(struct sk_buff *skb);
+int br_handle_frame_finish(struct sk_buff *skb);
 /* Only used in br_device.c */
 static inline int br_nf_pre_routing_finish_bridge_slow(struct sk_buff *skb)
 {
index dfaf116b3e8125a3f36511bfe35b64fc81d463e3..6e4591bb54d495d2f4ee3f82058305be7539d025 100644 (file)
@@ -6,7 +6,7 @@
 
 #include <uapi/linux/netfilter_ipv4.h>
 
-extern int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type);
-extern __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
-                                  unsigned int dataoff, u_int8_t protocol);
+int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type);
+__sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
+                      unsigned int dataoff, u_int8_t protocol);
 #endif /*__LINUX_IP_NETFILTER_H*/
index 2d4df6ce043efab2f9017bc5359b90846f085fb7..64dad1cc1a4bc86d391e8e9cbffea5091b9bfa53 100644 (file)
 
 
 #ifdef CONFIG_NETFILTER
-extern int ip6_route_me_harder(struct sk_buff *skb);
-extern __sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
-                                   unsigned int dataoff, u_int8_t protocol);
+int ip6_route_me_harder(struct sk_buff *skb);
+__sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
+                       unsigned int dataoff, u_int8_t protocol);
 
-extern int ipv6_netfilter_init(void);
-extern void ipv6_netfilter_fini(void);
+int ipv6_netfilter_init(void);
+void ipv6_netfilter_fini(void);
 
 /*
  * Hook functions for ipv6 to allow xt_* modules to be built-in even
index 01fd84b566f773505122f235ceb3f70ec158bb04..49f52c8f4422ffd8ce6f0e17d33e960e92361076 100644 (file)
@@ -1455,7 +1455,8 @@ struct nfs_rpc_ops {
        struct inode * (*open_context) (struct inode *dir,
                                struct nfs_open_context *ctx,
                                int open_flags,
-                               struct iattr *iattr);
+                               struct iattr *iattr,
+                               int *);
        int (*have_delegation)(struct inode *, fmode_t);
        int (*return_delegation)(struct inode *);
        struct nfs_client *(*alloc_client) (const struct nfs_client_initdata *);
index 535cecf1e02f7823a1ec3f416c3b06382ea6d8fa..fcd63baee5f28b3361625cc1877b9a275d221204 100644 (file)
@@ -1,8 +1,6 @@
 #ifndef __OF_IRQ_H
 #define __OF_IRQ_H
 
-#if defined(CONFIG_OF)
-struct of_irq;
 #include <linux/types.h>
 #include <linux/errno.h>
 #include <linux/irq.h>
@@ -10,14 +8,6 @@ struct of_irq;
 #include <linux/ioport.h>
 #include <linux/of.h>
 
-/*
- * irq_of_parse_and_map() is used by all OF enabled platforms; but SPARC
- * implements it differently.  However, the prototype is the same for all,
- * so declare it here regardless of the CONFIG_OF_IRQ setting.
- */
-extern unsigned int irq_of_parse_and_map(struct device_node *node, int index);
-
-#if defined(CONFIG_OF_IRQ)
 /**
  * of_irq - container for device_node/irq_specifier pair for an irq controller
  * @controller: pointer to interrupt controller device tree node
@@ -71,11 +61,17 @@ extern int of_irq_to_resource(struct device_node *dev, int index,
 extern int of_irq_count(struct device_node *dev);
 extern int of_irq_to_resource_table(struct device_node *dev,
                struct resource *res, int nr_irqs);
-extern struct device_node *of_irq_find_parent(struct device_node *child);
 
 extern void of_irq_init(const struct of_device_id *matches);
 
-#endif /* CONFIG_OF_IRQ */
+#if defined(CONFIG_OF)
+/*
+ * irq_of_parse_and_map() is used by all OF enabled platforms; but SPARC
+ * implements it differently.  However, the prototype is the same for all,
+ * so declare it here regardless of the CONFIG_OF_IRQ setting.
+ */
+extern unsigned int irq_of_parse_and_map(struct device_node *node, int index);
+extern struct device_node *of_irq_find_parent(struct device_node *child);
 
 #else /* !CONFIG_OF */
 static inline unsigned int irq_of_parse_and_map(struct device_node *dev,
index 67e13aa5a4781d2c8fdfa8fc902d2bbc90f2a3a0..9bdad43ad228a5501c2dc8b0e6cc77a6da8c7b30 100644 (file)
@@ -40,6 +40,8 @@ enum regulator_status {
 };
 
 /**
+ * struct regulator_linear_range - specify linear voltage ranges
+ *
  * Specify a range of voltages for regulator_map_linar_range() and
  * regulator_list_linear_range().
  *
index 2ddb48d9312c260e1e41a87ac25a19314f2aa379..1cd32f96055e4ca0f2072a30b6f8ba26fd28b65d 100644 (file)
@@ -498,7 +498,7 @@ struct sk_buff {
         * headers if needed
         */
        __u8                    encapsulation:1;
-       /* 7/9 bit hole (depending on ndisc_nodetype presence) */
+       /* 6/8 bit hole (depending on ndisc_nodetype presence) */
        kmemcheck_bitfield_end(flags2);
 
 #if defined CONFIG_NET_DMA || defined CONFIG_NET_RX_BUSY_POLL
@@ -585,8 +585,8 @@ static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
        skb->_skb_refdst = (unsigned long)dst;
 }
 
-extern void __skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst,
-                               bool force);
+void __skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst,
+                        bool force);
 
 /**
  * skb_dst_set_noref - sets skb dst, hopefully, without taking reference
@@ -634,20 +634,20 @@ static inline struct rtable *skb_rtable(const struct sk_buff *skb)
        return (struct rtable *)skb_dst(skb);
 }
 
-extern void kfree_skb(struct sk_buff *skb);
-extern void kfree_skb_list(struct sk_buff *segs);
-extern void skb_tx_error(struct sk_buff *skb);
-extern void consume_skb(struct sk_buff *skb);
-extern void           __kfree_skb(struct sk_buff *skb);
+void kfree_skb(struct sk_buff *skb);
+void kfree_skb_list(struct sk_buff *segs);
+void skb_tx_error(struct sk_buff *skb);
+void consume_skb(struct sk_buff *skb);
+void  __kfree_skb(struct sk_buff *skb);
 extern struct kmem_cache *skbuff_head_cache;
 
-extern void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
-extern bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
-                            bool *fragstolen, int *delta_truesize);
+void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
+bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
+                     bool *fragstolen, int *delta_truesize);
 
-extern struct sk_buff *__alloc_skb(unsigned int size,
-                                  gfp_t priority, int flags, int node);
-extern struct sk_buff *build_skb(void *data, unsigned int frag_size);
+struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
+                           int node);
+struct sk_buff *build_skb(void *data, unsigned int frag_size);
 static inline struct sk_buff *alloc_skb(unsigned int size,
                                        gfp_t priority)
 {
@@ -660,41 +660,33 @@ static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
        return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE);
 }
 
-extern struct sk_buff *__alloc_skb_head(gfp_t priority, int node);
+struct sk_buff *__alloc_skb_head(gfp_t priority, int node);
 static inline struct sk_buff *alloc_skb_head(gfp_t priority)
 {
        return __alloc_skb_head(priority, -1);
 }
 
-extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
-extern int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
-extern struct sk_buff *skb_clone(struct sk_buff *skb,
-                                gfp_t priority);
-extern struct sk_buff *skb_copy(const struct sk_buff *skb,
-                               gfp_t priority);
-extern struct sk_buff *__pskb_copy(struct sk_buff *skb,
-                                int headroom, gfp_t gfp_mask);
-
-extern int            pskb_expand_head(struct sk_buff *skb,
-                                       int nhead, int ntail,
-                                       gfp_t gfp_mask);
-extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
-                                           unsigned int headroom);
-extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
-                                      int newheadroom, int newtailroom,
-                                      gfp_t priority);
-extern int            skb_to_sgvec(struct sk_buff *skb,
-                                   struct scatterlist *sg, int offset,
-                                   int len);
-extern int            skb_cow_data(struct sk_buff *skb, int tailbits,
-                                   struct sk_buff **trailer);
-extern int            skb_pad(struct sk_buff *skb, int pad);
+struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
+int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
+struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority);
+struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority);
+struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask);
+
+int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask);
+struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
+                                    unsigned int headroom);
+struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom,
+                               int newtailroom, gfp_t priority);
+int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset,
+                int len);
+int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer);
+int skb_pad(struct sk_buff *skb, int pad);
 #define dev_kfree_skb(a)       consume_skb(a)
 
-extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
-                       int getfrag(void *from, char *to, int offset,
-                       int len,int odd, struct sk_buff *skb),
-                       void *from, int length);
+int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
+                           int getfrag(void *from, char *to, int offset,
+                                       int len, int odd, struct sk_buff *skb),
+                           void *from, int length);
 
 struct skb_seq_state {
        __u32           lower_offset;
@@ -706,18 +698,17 @@ struct skb_seq_state {
        __u8            *frag_data;
 };
 
-extern void          skb_prepare_seq_read(struct sk_buff *skb,
-                                          unsigned int from, unsigned int to,
-                                          struct skb_seq_state *st);
-extern unsigned int   skb_seq_read(unsigned int consumed, const u8 **data,
-                                  struct skb_seq_state *st);
-extern void          skb_abort_seq_read(struct skb_seq_state *st);
+void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
+                         unsigned int to, struct skb_seq_state *st);
+unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
+                         struct skb_seq_state *st);
+void skb_abort_seq_read(struct skb_seq_state *st);
 
-extern unsigned int   skb_find_text(struct sk_buff *skb, unsigned int from,
-                                   unsigned int to, struct ts_config *config,
-                                   struct ts_state *state);
+unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
+                          unsigned int to, struct ts_config *config,
+                          struct ts_state *state);
 
-extern void __skb_get_rxhash(struct sk_buff *skb);
+void __skb_get_rxhash(struct sk_buff *skb);
 static inline __u32 skb_get_rxhash(struct sk_buff *skb)
 {
        if (!skb->l4_rxhash)
@@ -1095,7 +1086,8 @@ static inline void skb_queue_head_init_class(struct sk_buff_head *list,
  *     The "__skb_xxxx()" functions are the non-atomic ones that
  *     can only be called with interrupts disabled.
  */
-extern void        skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list);
+void skb_insert(struct sk_buff *old, struct sk_buff *newsk,
+               struct sk_buff_head *list);
 static inline void __skb_insert(struct sk_buff *newsk,
                                struct sk_buff *prev, struct sk_buff *next,
                                struct sk_buff_head *list)
@@ -1201,8 +1193,8 @@ static inline void __skb_queue_after(struct sk_buff_head *list,
        __skb_insert(newsk, prev, prev->next, list);
 }
 
-extern void skb_append(struct sk_buff *old, struct sk_buff *newsk,
-                      struct sk_buff_head *list);
+void skb_append(struct sk_buff *old, struct sk_buff *newsk,
+               struct sk_buff_head *list);
 
 static inline void __skb_queue_before(struct sk_buff_head *list,
                                      struct sk_buff *next,
@@ -1221,7 +1213,7 @@ static inline void __skb_queue_before(struct sk_buff_head *list,
  *
  *     A buffer cannot be placed on two lists at the same time.
  */
-extern void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
+void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
 static inline void __skb_queue_head(struct sk_buff_head *list,
                                    struct sk_buff *newsk)
 {
@@ -1238,7 +1230,7 @@ static inline void __skb_queue_head(struct sk_buff_head *list,
  *
  *     A buffer cannot be placed on two lists at the same time.
  */
-extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
+void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
 static inline void __skb_queue_tail(struct sk_buff_head *list,
                                   struct sk_buff *newsk)
 {
@@ -1249,7 +1241,7 @@ static inline void __skb_queue_tail(struct sk_buff_head *list,
  * remove sk_buff from list. _Must_ be called atomically, and with
  * the list known..
  */
-extern void       skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
+void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
 static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
 {
        struct sk_buff *next, *prev;
@@ -1270,7 +1262,7 @@ static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
  *     so must be used with appropriate locks held only. The head item is
  *     returned or %NULL if the list is empty.
  */
-extern struct sk_buff *skb_dequeue(struct sk_buff_head *list);
+struct sk_buff *skb_dequeue(struct sk_buff_head *list);
 static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
 {
        struct sk_buff *skb = skb_peek(list);
@@ -1287,7 +1279,7 @@ static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
  *     so must be used with appropriate locks held only. The tail item is
  *     returned or %NULL if the list is empty.
  */
-extern struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
+struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
 static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
 {
        struct sk_buff *skb = skb_peek_tail(list);
@@ -1373,8 +1365,8 @@ static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
        skb_shinfo(skb)->nr_frags = i + 1;
 }
 
-extern void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page,
-                           int off, int size, unsigned int truesize);
+void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
+                    int size, unsigned int truesize);
 
 #define SKB_PAGE_ASSERT(skb)   BUG_ON(skb_shinfo(skb)->nr_frags)
 #define SKB_FRAG_ASSERT(skb)   BUG_ON(skb_has_frag_list(skb))
@@ -1418,7 +1410,7 @@ static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
 /*
  *     Add data to an sk_buff
  */
-extern unsigned char *skb_put(struct sk_buff *skb, unsigned int len);
+unsigned char *skb_put(struct sk_buff *skb, unsigned int len);
 static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
 {
        unsigned char *tmp = skb_tail_pointer(skb);
@@ -1428,7 +1420,7 @@ static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
        return tmp;
 }
 
-extern unsigned char *skb_push(struct sk_buff *skb, unsigned int len);
+unsigned char *skb_push(struct sk_buff *skb, unsigned int len);
 static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
 {
        skb->data -= len;
@@ -1436,7 +1428,7 @@ static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
        return skb->data;
 }
 
-extern unsigned char *skb_pull(struct sk_buff *skb, unsigned int len);
+unsigned char *skb_pull(struct sk_buff *skb, unsigned int len);
 static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
 {
        skb->len -= len;
@@ -1449,7 +1441,7 @@ static inline unsigned char *skb_pull_inline(struct sk_buff *skb, unsigned int l
        return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
 }
 
-extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
+unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
 
 static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
 {
@@ -1753,7 +1745,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
 #define NET_SKB_PAD    max(32, L1_CACHE_BYTES)
 #endif
 
-extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
+int ___pskb_trim(struct sk_buff *skb, unsigned int len);
 
 static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
 {
@@ -1765,7 +1757,7 @@ static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
        skb_set_tail_pointer(skb, len);
 }
 
-extern void skb_trim(struct sk_buff *skb, unsigned int len);
+void skb_trim(struct sk_buff *skb, unsigned int len);
 
 static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
 {
@@ -1838,7 +1830,7 @@ static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
  *     the list and one reference dropped. This function does not take the
  *     list lock and the caller must hold the relevant locks to use it.
  */
-extern void skb_queue_purge(struct sk_buff_head *list);
+void skb_queue_purge(struct sk_buff_head *list);
 static inline void __skb_queue_purge(struct sk_buff_head *list)
 {
        struct sk_buff *skb;
@@ -1850,11 +1842,10 @@ static inline void __skb_queue_purge(struct sk_buff_head *list)
 #define NETDEV_FRAG_PAGE_MAX_SIZE  (PAGE_SIZE << NETDEV_FRAG_PAGE_MAX_ORDER)
 #define NETDEV_PAGECNT_MAX_BIAS           NETDEV_FRAG_PAGE_MAX_SIZE
 
-extern void *netdev_alloc_frag(unsigned int fragsz);
+void *netdev_alloc_frag(unsigned int fragsz);
 
-extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
-                                         unsigned int length,
-                                         gfp_t gfp_mask);
+struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length,
+                                  gfp_t gfp_mask);
 
 /**
  *     netdev_alloc_skb - allocate an skbuff for rx on a specific device
@@ -2342,60 +2333,42 @@ static inline void skb_frag_add_head(struct sk_buff *skb, struct sk_buff *frag)
 #define skb_walk_frags(skb, iter)      \
        for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
 
-extern struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
-                                          int *peeked, int *off, int *err);
-extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
-                                        int noblock, int *err);
-extern unsigned int    datagram_poll(struct file *file, struct socket *sock,
-                                    struct poll_table_struct *wait);
-extern int            skb_copy_datagram_iovec(const struct sk_buff *from,
-                                              int offset, struct iovec *to,
-                                              int size);
-extern int            skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
-                                                       int hlen,
-                                                       struct iovec *iov);
-extern int            skb_copy_datagram_from_iovec(struct sk_buff *skb,
-                                                   int offset,
-                                                   const struct iovec *from,
-                                                   int from_offset,
-                                                   int len);
-extern int            zerocopy_sg_from_iovec(struct sk_buff *skb,
-                                             const struct iovec *frm,
-                                             int offset,
-                                             size_t count);
-extern int            skb_copy_datagram_const_iovec(const struct sk_buff *from,
-                                                    int offset,
-                                                    const struct iovec *to,
-                                                    int to_offset,
-                                                    int size);
-extern void           skb_free_datagram(struct sock *sk, struct sk_buff *skb);
-extern void           skb_free_datagram_locked(struct sock *sk,
-                                               struct sk_buff *skb);
-extern int            skb_kill_datagram(struct sock *sk, struct sk_buff *skb,
-                                        unsigned int flags);
-extern __wsum         skb_checksum(const struct sk_buff *skb, int offset,
-                                   int len, __wsum csum);
-extern int            skb_copy_bits(const struct sk_buff *skb, int offset,
-                                    void *to, int len);
-extern int            skb_store_bits(struct sk_buff *skb, int offset,
-                                     const void *from, int len);
-extern __wsum         skb_copy_and_csum_bits(const struct sk_buff *skb,
-                                             int offset, u8 *to, int len,
-                                             __wsum csum);
-extern int             skb_splice_bits(struct sk_buff *skb,
-                                               unsigned int offset,
-                                               struct pipe_inode_info *pipe,
-                                               unsigned int len,
-                                               unsigned int flags);
-extern void           skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
-extern void           skb_split(struct sk_buff *skb,
-                                struct sk_buff *skb1, const u32 len);
-extern int            skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
-                                int shiftlen);
-extern void           skb_scrub_packet(struct sk_buff *skb, bool xnet);
-
-extern struct sk_buff *skb_segment(struct sk_buff *skb,
-                                  netdev_features_t features);
+struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
+                                   int *peeked, int *off, int *err);
+struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
+                                 int *err);
+unsigned int datagram_poll(struct file *file, struct socket *sock,
+                          struct poll_table_struct *wait);
+int skb_copy_datagram_iovec(const struct sk_buff *from, int offset,
+                           struct iovec *to, int size);
+int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, int hlen,
+                                    struct iovec *iov);
+int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset,
+                                const struct iovec *from, int from_offset,
+                                int len);
+int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *frm,
+                          int offset, size_t count);
+int skb_copy_datagram_const_iovec(const struct sk_buff *from, int offset,
+                                 const struct iovec *to, int to_offset,
+                                 int size);
+void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
+void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb);
+int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
+__wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
+                   __wsum csum);
+int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
+int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
+__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
+                             int len, __wsum csum);
+int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
+                   struct pipe_inode_info *pipe, unsigned int len,
+                   unsigned int flags);
+void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
+void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
+int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
+void skb_scrub_packet(struct sk_buff *skb, bool xnet);
+
+struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
 
 static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
                                       int len, void *buffer)
@@ -2440,7 +2413,7 @@ static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
        memcpy(skb->data + offset, from, len);
 }
 
-extern void skb_init(void);
+void skb_init(void);
 
 static inline ktime_t skb_get_ktime(const struct sk_buff *skb)
 {
@@ -2483,12 +2456,12 @@ static inline ktime_t net_invalid_timestamp(void)
        return ktime_set(0, 0);
 }
 
-extern void skb_timestamping_init(void);
+void skb_timestamping_init(void);
 
 #ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
 
-extern void skb_clone_tx_timestamp(struct sk_buff *skb);
-extern bool skb_defer_rx_timestamp(struct sk_buff *skb);
+void skb_clone_tx_timestamp(struct sk_buff *skb);
+bool skb_defer_rx_timestamp(struct sk_buff *skb);
 
 #else /* CONFIG_NETWORK_PHY_TIMESTAMPING */
 
@@ -2529,8 +2502,8 @@ void skb_complete_tx_timestamp(struct sk_buff *skb,
  * generates a software time stamp (otherwise), then queues the clone
  * to the error queue of the socket.  Errors are silently ignored.
  */
-extern void skb_tstamp_tx(struct sk_buff *orig_skb,
-                       struct skb_shared_hwtstamps *hwtstamps);
+void skb_tstamp_tx(struct sk_buff *orig_skb,
+                  struct skb_shared_hwtstamps *hwtstamps);
 
 static inline void sw_tx_timestamp(struct sk_buff *skb)
 {
@@ -2562,8 +2535,8 @@ static inline void skb_tx_timestamp(struct sk_buff *skb)
  */
 void skb_complete_wifi_ack(struct sk_buff *skb, bool acked);
 
-extern __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
-extern __sum16 __skb_checksum_complete(struct sk_buff *skb);
+__sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
+__sum16 __skb_checksum_complete(struct sk_buff *skb);
 
 static inline int skb_csum_unnecessary(const struct sk_buff *skb)
 {
@@ -2593,7 +2566,7 @@ static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
 }
 
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
-extern void nf_conntrack_destroy(struct nf_conntrack *nfct);
+void nf_conntrack_destroy(struct nf_conntrack *nfct);
 static inline void nf_conntrack_put(struct nf_conntrack *nfct)
 {
        if (nfct && atomic_dec_and_test(&nfct->use))
@@ -2732,21 +2705,17 @@ static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
        return skb->queue_mapping != 0;
 }
 
-extern u16 __skb_tx_hash(const struct net_device *dev,
-                        const struct sk_buff *skb,
-                        unsigned int num_tx_queues);
+u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
+                 unsigned int num_tx_queues);
 
-#ifdef CONFIG_XFRM
 static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
 {
+#ifdef CONFIG_XFRM
        return skb->sp;
-}
 #else
-static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
-{
        return NULL;
-}
 #endif
+}
 
 /* Keeps track of mac header offset relative to skb->head.
  * It is useful for TSO of Tunneling protocol. e.g. GRE.
@@ -2783,12 +2752,13 @@ static inline bool skb_is_gso(const struct sk_buff *skb)
        return skb_shinfo(skb)->gso_size;
 }
 
+/* Note: Should be called only if skb_is_gso(skb) is true */
 static inline bool skb_is_gso_v6(const struct sk_buff *skb)
 {
        return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
 }
 
-extern void __skb_warn_lro_forwarding(const struct sk_buff *skb);
+void __skb_warn_lro_forwarding(const struct sk_buff *skb);
 
 static inline bool skb_warn_if_lro(const struct sk_buff *skb)
 {
index cfb7ca094b384522d2378c2a952bbe788812f1c6..731f5237d5f4e0f87dd817e15983f509d3b4d483 100644 (file)
@@ -155,6 +155,12 @@ smp_call_function_any(const struct cpumask *mask, smp_call_func_t func,
 
 static inline void kick_all_cpus_sync(void) {  }
 
+static inline void __smp_call_function_single(int cpuid,
+               struct call_single_data *data, int wait)
+{
+       on_each_cpu(data->func, data->info, wait);
+}
+
 #endif /* !SMP */
 
 /*
index 86a12b0cb239850d903e53b52091ad53fd179115..0688472500bbabb1274a55c1ab0720d39b10922b 100644 (file)
@@ -108,6 +108,16 @@ static inline int ssb_gige_get_macaddr(struct pci_dev *pdev, u8 *macaddr)
        return 0;
 }
 
+/* Get the device phy address */
+static inline int ssb_gige_get_phyaddr(struct pci_dev *pdev)
+{
+       struct ssb_gige *dev = pdev_to_ssb_gige(pdev);
+       if (!dev)
+               return -ENODEV;
+
+       return dev->dev->bus->sprom.et0phyaddr;
+}
+
 extern int ssb_gige_pcibios_plat_dev_init(struct ssb_device *sdev,
                                          struct pci_dev *pdev);
 extern int ssb_gige_map_irq(struct ssb_device *sdev,
@@ -174,6 +184,10 @@ static inline int ssb_gige_get_macaddr(struct pci_dev *pdev, u8 *macaddr)
 {
        return -ENODEV;
 }
+static inline int ssb_gige_get_phyaddr(struct pci_dev *pdev)
+{
+       return -ENODEV;
+}
 
 #endif /* CONFIG_SSB_DRIVER_GIGE */
 #endif /* LINUX_SSB_DRIVER_GIGE_H_ */
index 9cb2fe8ca944d5095043dd7b7ce71116a3fc21eb..e303eef94dd5cea92d16d7ae63aa35f5c3f1d4bb 100644 (file)
@@ -42,6 +42,7 @@ struct usbnet {
        struct usb_host_endpoint *status;
        unsigned                maxpacket;
        struct timer_list       delay;
+       const char              *padding_pkt;
 
        /* protocol/interface state */
        struct net_device       *net;
index fb314de2b61ba228059b1f09cb916d7d275a6a9b..86505bfa5d2c4829698d21ce81ba47575b504941 100644 (file)
@@ -67,6 +67,10 @@ int ipv6_chk_addr(struct net *net, const struct in6_addr *addr,
 int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr);
 #endif
 
+bool ipv6_chk_custom_prefix(const struct in6_addr *addr,
+                                  const unsigned int prefix_len,
+                                  struct net_device *dev);
+
 int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev);
 
 struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net,
index aaeaf0938ec0af1181c4660c8fa0468937e1bc74..15f10841e2b5ddedb94dadfe166e2fe19b09cfee 100644 (file)
@@ -104,6 +104,7 @@ enum {
 enum {
        HCI_SETUP,
        HCI_AUTO_OFF,
+       HCI_RFKILLED,
        HCI_MGMT,
        HCI_PAIRABLE,
        HCI_SERVICE_CACHE,
index ac2439d02f54bbb7d070f39fff79a65ad7cf6c81..7e64bd8bbda941319b494bcb7b66e79b7f52370a 100644 (file)
@@ -14,4 +14,5 @@ struct flow_keys {
 };
 
 bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow);
+__be32 skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto);
 #endif
index 594dfeead70fe6aceaa503a9131808cf62ddd787..10d6838378c36697afd748232ea6ab499c608a1b 100644 (file)
@@ -302,35 +302,25 @@ static inline struct sock *inet_lookup_listener(struct net *net,
                                   ((__force __u64)(__be32)(__saddr)));
 #endif /* __BIG_ENDIAN */
 #define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif)    \
-       ((inet_sk(__sk)->inet_portpair == (__ports))            &&      \
-        (inet_sk(__sk)->inet_addrpair == (__cookie))           &&      \
+       (((__sk)->sk_portpair == (__ports))                     &&      \
+        ((__sk)->sk_addrpair == (__cookie))                    &&      \
         (!(__sk)->sk_bound_dev_if      ||                              \
           ((__sk)->sk_bound_dev_if == (__dif)))                &&      \
         net_eq(sock_net(__sk), (__net)))
-#define INET_TW_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif)\
-       ((inet_twsk(__sk)->tw_portpair == (__ports))    &&              \
-        (inet_twsk(__sk)->tw_addrpair == (__cookie))   &&              \
-        (!(__sk)->sk_bound_dev_if      ||                              \
-          ((__sk)->sk_bound_dev_if == (__dif)))        &&              \
-        net_eq(sock_net(__sk), (__net)))
 #else /* 32-bit arch */
 #define INET_ADDR_COOKIE(__name, __saddr, __daddr)
 #define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif) \
-       ((inet_sk(__sk)->inet_portpair == (__ports))    &&              \
-        (inet_sk(__sk)->inet_daddr     == (__saddr))   &&              \
-        (inet_sk(__sk)->inet_rcv_saddr == (__daddr))   &&              \
-        (!(__sk)->sk_bound_dev_if      ||                              \
-          ((__sk)->sk_bound_dev_if == (__dif)))        &&              \
-        net_eq(sock_net(__sk), (__net)))
-#define INET_TW_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif) \
-       ((inet_twsk(__sk)->tw_portpair == (__ports))    &&              \
-        (inet_twsk(__sk)->tw_daddr     == (__saddr))   &&              \
-        (inet_twsk(__sk)->tw_rcv_saddr == (__daddr))   &&              \
+       (((__sk)->sk_portpair == (__ports))             &&              \
+        ((__sk)->sk_daddr      == (__saddr))           &&              \
+        ((__sk)->sk_rcv_saddr  == (__daddr))           &&              \
         (!(__sk)->sk_bound_dev_if      ||                              \
           ((__sk)->sk_bound_dev_if == (__dif)))        &&              \
         net_eq(sock_net(__sk), (__net)))
 #endif /* 64-bit arch */
 
+#define INET_TW_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif)\
+       INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif)
+
 /*
  * Sockets in TCP_CLOSE state are _always_ taken out of the hash, so we need
  * not check it for lookups anymore, thanks Alexey. -DaveM
index 636d203727a2dcef09f2f8f0b2e214c317c7675a..6d9a7e6eb5a4a14ae3e11d5ade213b5e452c5a62 100644 (file)
@@ -103,6 +103,9 @@ struct inet_cork {
        int                     length; /* Total length of all frames */
        struct dst_entry        *dst;
        u8                      tx_flags;
+       __u8                    ttl;
+       __s16                   tos;
+       char                    priority;
 };
 
 struct inet_cork_full {
@@ -143,10 +146,8 @@ struct inet_sock {
        /* Socket demultiplex comparisons on incoming packets. */
 #define inet_daddr             sk.__sk_common.skc_daddr
 #define inet_rcv_saddr         sk.__sk_common.skc_rcv_saddr
-#define inet_addrpair          sk.__sk_common.skc_addrpair
 #define inet_dport             sk.__sk_common.skc_dport
 #define inet_num               sk.__sk_common.skc_num
-#define inet_portpair          sk.__sk_common.skc_portpair
 
        __be32                  inet_saddr;
        __s16                   uc_ttl;
index 828200ab11251722d1411e33ff27971055126af8..f528d1b0ac951bba9c247a28f1955d2693ee6138 100644 (file)
@@ -58,6 +58,11 @@ struct inet_hashinfo;
 # define INET_TWDR_RECYCLE_TICK (12 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG)
 #endif
 
+static inline u32 inet_tw_time_stamp(void)
+{
+       return jiffies;
+}
+
 /* TIME_WAIT reaping mechanism. */
 #define INET_TWDR_TWKILL_SLOTS 8 /* Please keep this a power of 2. */
 
@@ -112,10 +117,8 @@ struct inet_timewait_sock {
 #define tw_net                 __tw_common.skc_net
 #define tw_daddr               __tw_common.skc_daddr
 #define tw_rcv_saddr           __tw_common.skc_rcv_saddr
-#define tw_addrpair            __tw_common.skc_addrpair
 #define tw_dport               __tw_common.skc_dport
 #define tw_num                 __tw_common.skc_num
-#define tw_portpair            __tw_common.skc_portpair
 
        int                     tw_timeout;
        volatile unsigned char  tw_substate;
@@ -132,7 +135,7 @@ struct inet_timewait_sock {
                                tw_tos          : 8,
                                tw_ipv6_offset  : 16;
        kmemcheck_bitfield_end(flags);
-       unsigned long           tw_ttd;
+       u32                     tw_ttd;
        struct inet_bind_bucket *tw_tb;
        struct hlist_node       tw_death_node;
 };
@@ -189,12 +192,6 @@ static inline struct inet_timewait_sock *inet_twsk(const struct sock *sk)
        return (struct inet_timewait_sock *)sk;
 }
 
-static inline __be32 sk_rcv_saddr(const struct sock *sk)
-{
-/* both inet_sk() and inet_twsk() store rcv_saddr in skc_rcv_saddr */
-       return sk->__sk_common.skc_rcv_saddr;
-}
-
 void inet_twsk_put(struct inet_timewait_sock *tw);
 
 int inet_twsk_unhash(struct inet_timewait_sock *tw);
index c1f192b8cd0e98dbcf062faae9bf4c2daf534195..16078f422397841a5c532ee4afb0343563d6ef2c 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/skbuff.h>
 
 #include <net/inet_sock.h>
+#include <net/route.h>
 #include <net/snmp.h>
 #include <net/flow.h>
 
@@ -56,6 +57,9 @@ struct ipcm_cookie {
        int                     oif;
        struct ip_options_rcu   *opt;
        __u8                    tx_flags;
+       __u8                    ttl;
+       __s16                   tos;
+       char                    priority;
 };
 
 #define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb))
@@ -137,6 +141,16 @@ static inline struct sk_buff *ip_finish_skb(struct sock *sk, struct flowi4 *fl4)
        return __ip_make_skb(sk, fl4, &sk->sk_write_queue, &inet_sk(sk)->cork.base);
 }
 
+static inline __u8 get_rttos(struct ipcm_cookie* ipc, struct inet_sock *inet)
+{
+       return (ipc->tos != -1) ? RT_TOS(ipc->tos) : RT_TOS(inet->tos);
+}
+
+static inline __u8 get_rtconn_flags(struct ipcm_cookie* ipc, struct sock* sk)
+{
+       return (ipc->tos != -1) ? RT_CONN_FLAGS_TOS(sk, ipc->tos) : RT_CONN_FLAGS(sk);
+}
+
 /* datagram.c */
 int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
 
@@ -203,11 +217,7 @@ static inline void snmp_mib_free(void __percpu *ptr[SNMP_ARRAY_SZ])
        }
 }
 
-extern struct local_ports {
-       seqlock_t       lock;
-       int             range[2];
-} sysctl_local_ports;
-void inet_get_local_port_range(int *low, int *high);
+void inet_get_local_port_range(struct net *net, int *low, int *high);
 
 extern unsigned long *sysctl_local_reserved_ports;
 static inline int inet_is_reserved_local_port(int port)
index eab88f0e2088ec2661024a8037770d32e493fe71..6738f3409a6f701e82f431a760750f749863a73c 100644 (file)
@@ -280,10 +280,6 @@ struct fib6_node *fib6_locate(struct fib6_node *root,
                              const struct in6_addr *daddr, int dst_len,
                              const struct in6_addr *saddr, int src_len);
 
-void fib6_clean_all_ro(struct net *net,
-                      int (*func)(struct rt6_info *, void *arg),
-                      int prune, void *arg);
-
 void fib6_clean_all(struct net *net, int (*func)(struct rt6_info *, void *arg),
                    int prune, void *arg);
 
@@ -299,6 +295,8 @@ void fib6_gc_cleanup(void);
 
 int fib6_init(void);
 
+int ipv6_route_open(struct inode *inode, struct file *file);
+
 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
 int fib6_rules_init(void);
 void fib6_rules_cleanup(void);
index b6fd378c09c854469777395262682fe32d7225da..1c2e1b9f6b8603aecdf14452a6b424b227cd504b 100644 (file)
@@ -723,8 +723,6 @@ struct ip_vs_dest_dst {
        struct rcu_head         rcu_head;
 };
 
-/* In grace period after removing */
-#define IP_VS_DEST_STATE_REMOVING      0x01
 /*
  *     The real server destination forwarding entry
  *     with ip address, port number, and so on.
@@ -742,7 +740,7 @@ struct ip_vs_dest {
 
        atomic_t                refcnt;         /* reference counter */
        struct ip_vs_stats      stats;          /* statistics */
-       unsigned long           state;          /* state flags */
+       unsigned long           idle_start;     /* start time, jiffies */
 
        /* connection counters and thresholds */
        atomic_t                activeconns;    /* active connections */
@@ -756,14 +754,13 @@ struct ip_vs_dest {
        struct ip_vs_dest_dst __rcu *dest_dst;  /* cached dst info */
 
        /* for virtual service */
-       struct ip_vs_service    *svc;           /* service it belongs to */
+       struct ip_vs_service __rcu *svc;        /* service it belongs to */
        __u16                   protocol;       /* which protocol (TCP/UDP) */
        __be16                  vport;          /* virtual port number */
        union nf_inet_addr      vaddr;          /* virtual IP address */
        __u32                   vfwmark;        /* firewall mark of service */
 
        struct list_head        t_list;         /* in dest_trash */
-       struct rcu_head         rcu_head;
        unsigned int            in_rs_table:1;  /* we are in rs_table */
 };
 
@@ -1638,7 +1635,7 @@ static inline void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp)
 /* CONFIG_IP_VS_NFCT */
 #endif
 
-static inline unsigned int
+static inline int
 ip_vs_dest_conn_overhead(struct ip_vs_dest *dest)
 {
        /*
index 687c8feecd0967dc3658adf3c42a21db2b1fb685..31912c3be772bfa5971e3f31a11f982cdd22ad2d 100644 (file)
@@ -112,6 +112,7 @@ struct mrp_applicant {
        struct mrp_application  *app;
        struct net_device       *dev;
        struct timer_list       join_timer;
+       struct timer_list       periodic_timer;
 
        spinlock_t              lock;
        struct sk_buff_head     queue;
index 38c2afb585de16a0b759a330c8960b738c9605d5..bcc4a8ed44504105b10ad10db82bbaf04796f7e3 100644 (file)
@@ -74,6 +74,7 @@ struct net {
        struct hlist_head       *dev_index_head;
        unsigned int            dev_base_seq;   /* protected by rtnl_mutex */
        int                     ifindex;
+       unsigned int            dev_unreg_count;
 
        /* core fib_rules */
        struct list_head        rules_ops;
index 968186642bb163e79eedbe08d547b32c9292159d..6793614e6502a0eb51ca7efabc593e7bb6866576 100644 (file)
@@ -56,7 +56,7 @@ struct synproxy_options {
 
 struct tcphdr;
 struct xt_synproxy_info;
-void synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
+bool synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
                            const struct tcphdr *th,
                            struct synproxy_options *opts);
 unsigned int synproxy_options_size(const struct synproxy_options *opts);
index bf2ec2202c5698b1bb73e22df60b4a3bc9d95c01..5dbd232e12ffce75dbcbb9cbfcac2ccdf5cf1cec 100644 (file)
@@ -15,6 +15,10 @@ struct fib_rules_ops;
 struct hlist_head;
 struct fib_table;
 struct sock;
+struct local_ports {
+       seqlock_t       lock;
+       int             range[2];
+};
 
 struct netns_ipv4 {
 #ifdef CONFIG_SYSCTL
@@ -62,6 +66,8 @@ struct netns_ipv4 {
        int sysctl_icmp_ratemask;
        int sysctl_icmp_errors_use_inbound_ifaddr;
 
+       struct local_ports sysctl_local_ports;
+
        int sysctl_tcp_ecn;
 
        kgid_t sysctl_ping_group_range[2];
index 6f572ca66d2521fc6b8d093fea27022a4ac9d147..0ad8e0102386943a8087a7bbded76ac082b63e96 100644 (file)
@@ -39,6 +39,7 @@
 #define RTO_ONLINK     0x01
 
 #define RT_CONN_FLAGS(sk)   (RT_TOS(inet_sk(sk)->tos) | sock_flag(sk, SOCK_LOCALROUTE))
+#define RT_CONN_FLAGS_TOS(sk,tos)   (RT_TOS(tos) | sock_flag(sk, SOCK_LOCALROUTE))
 
 struct fib_nh;
 struct fib_info;
index 52c1a906f28837c71719dd4a0f7eae1a1dee453a..f257486f17be4bed528826544359c569e6a4b2dd 100644 (file)
@@ -3,7 +3,6 @@
 
 #include <linux/types.h>
 
-void net_secret_init(void);
 __u32 secure_ip_id(__be32 daddr);
 __u32 secure_ipv6_id(const __be32 daddr[4]);
 u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
index 4625d2eff461b7c9206388db1776c93bdfabdfe7..e3bf213be6259ab00a1057cb58b44ad5e80515cb 100644 (file)
@@ -233,6 +233,7 @@ struct cg_proto;
   *    @sk_ll_usec: usecs to busypoll when there is no data
   *    @sk_allocation: allocation mode
   *    @sk_pacing_rate: Pacing rate (if supported by transport/packet scheduler)
+  *    @sk_max_pacing_rate: Maximum pacing rate (%SO_MAX_PACING_RATE)
   *    @sk_sndbuf: size of send buffer in bytes
   *    @sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE,
   *               %SO_OOBINLINE settings, %SO_TIMESTAMPING settings
@@ -299,6 +300,10 @@ struct sock {
 #define sk_dontcopy_begin      __sk_common.skc_dontcopy_begin
 #define sk_dontcopy_end                __sk_common.skc_dontcopy_end
 #define sk_hash                        __sk_common.skc_hash
+#define sk_portpair            __sk_common.skc_portpair
+#define sk_addrpair            __sk_common.skc_addrpair
+#define sk_daddr               __sk_common.skc_daddr
+#define sk_rcv_saddr           __sk_common.skc_rcv_saddr
 #define sk_family              __sk_common.skc_family
 #define sk_state               __sk_common.skc_state
 #define sk_reuse               __sk_common.skc_reuse
@@ -363,6 +368,7 @@ struct sock {
        int                     sk_wmem_queued;
        gfp_t                   sk_allocation;
        u32                     sk_pacing_rate; /* bytes per second */
+       u32                     sk_max_pacing_rate;
        netdev_features_t       sk_route_caps;
        netdev_features_t       sk_route_nocaps;
        int                     sk_gso_type;
@@ -409,6 +415,11 @@ struct sock {
        void                    (*sk_destruct)(struct sock *sk);
 };
 
+#define __sk_user_data(sk) ((*((void __rcu **)&(sk)->sk_user_data)))
+
+#define rcu_dereference_sk_user_data(sk)       rcu_dereference(__sk_user_data((sk)))
+#define rcu_assign_sk_user_data(sk, ptr)       rcu_assign_pointer(__sk_user_data((sk)), ptr)
+
 /*
  * SK_CAN_REUSE and SK_NO_REUSE on a socket mean that the socket is OK
  * or not whether his port will be reused by someone else. SK_FORCE_REUSE
index 765746192724bc9e445f65df263821ef807a3640..b8a9ed849801afa0283ac3f1511a2f56415ee8ca 100644 (file)
@@ -1357,6 +1357,12 @@ struct xfrm_tunnel {
        int priority;
 };
 
+struct xfrm_tunnel_notifier {
+       int (*handler)(struct sk_buff *skb);
+       struct xfrm_tunnel_notifier __rcu *next;
+       int priority;
+};
+
 struct xfrm6_tunnel {
        int (*handler)(struct sk_buff *skb);
        int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
@@ -1499,9 +1505,9 @@ int xfrm4_output(struct sk_buff *skb);
 int xfrm4_output_finish(struct sk_buff *skb);
 int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family);
 int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family);
-int xfrm4_mode_tunnel_input_register(struct xfrm_tunnel *handler);
-int xfrm4_mode_tunnel_input_deregister(struct xfrm_tunnel *handler);
 void xfrm4_local_error(struct sk_buff *skb, u32 mtu);
+int xfrm4_mode_tunnel_input_register(struct xfrm_tunnel_notifier *handler);
+int xfrm4_mode_tunnel_input_deregister(struct xfrm_tunnel_notifier *handler);
 int xfrm6_extract_header(struct sk_buff *skb);
 int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb);
 int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi);
@@ -1509,9 +1515,9 @@ int xfrm6_transport_finish(struct sk_buff *skb, int async);
 int xfrm6_rcv(struct sk_buff *skb);
 int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
                     xfrm_address_t *saddr, u8 proto);
+void xfrm6_local_error(struct sk_buff *skb, u32 mtu);
 int xfrm6_tunnel_register(struct xfrm6_tunnel *handler, unsigned short family);
-int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler,
-                           unsigned short family);
+int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family);
 __be32 xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr);
 __be32 xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr);
 int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb);
@@ -1520,7 +1526,6 @@ int xfrm6_output(struct sk_buff *skb);
 int xfrm6_output_finish(struct sk_buff *skb);
 int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
                          u8 **prevhdr);
-void xfrm6_local_error(struct sk_buff *skb, u32 mtu);
 
 #ifdef CONFIG_XFRM
 int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb);
index 60ae7c3db912de7e068452de1a1c1978cad0a662..4c2301d2ef1aa979ea0d6594ad1b6404368b920b 100644 (file)
@@ -618,6 +618,7 @@ TRACE_EVENT(block_rq_remap,
                __field( unsigned int,  nr_sector       )
                __field( dev_t,         old_dev         )
                __field( sector_t,      old_sector      )
+               __field( unsigned int,  nr_bios         )
                __array( char,          rwbs,   RWBS_LEN)
        ),
 
@@ -627,15 +628,16 @@ TRACE_EVENT(block_rq_remap,
                __entry->nr_sector      = blk_rq_sectors(rq);
                __entry->old_dev        = dev;
                __entry->old_sector     = from;
+               __entry->nr_bios        = blk_rq_count_bios(rq);
                blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
        ),
 
-       TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
+       TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu %u",
                  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
                  (unsigned long long)__entry->sector,
                  __entry->nr_sector,
                  MAJOR(__entry->old_dev), MINOR(__entry->old_dev),
-                 (unsigned long long)__entry->old_sector)
+                 (unsigned long long)__entry->old_sector, __entry->nr_bios)
 );
 
 #endif /* _TRACE_BLOCK_H */
index 45702c3c3837f316709438a462d4a6517e0ba168..f18b3b76e01e22e00c00ee133b2ebdc1013bcc62 100644 (file)
@@ -42,6 +42,7 @@ struct extent_buffer;
                { BTRFS_TREE_LOG_OBJECTID,      "TREE_LOG"      },      \
                { BTRFS_QUOTA_TREE_OBJECTID,    "QUOTA_TREE"    },      \
                { BTRFS_TREE_RELOC_OBJECTID,    "TREE_RELOC"    },      \
+               { BTRFS_UUID_TREE_OBJECTID,     "UUID_RELOC"    },      \
                { BTRFS_DATA_RELOC_TREE_OBJECTID, "DATA_RELOC_TREE" })
 
 #define show_root_type(obj)                                            \
index f04b69b6abf251d6f0ba720db25822b11b953489..38f14d0264c3aa78f3b62b93cba405c655e23ae6 100644 (file)
@@ -78,4 +78,6 @@
 
 #define SO_BUSY_POLL           46
 
+#define SO_MAX_PACING_RATE     47
+
 #endif /* __ASM_GENERIC_SOCKET_H */
index fa8b3adf9ffbbc478a7aae0f83ffbe3bd584a91b..46d41e8b0dccec30ec5b52f6dc772bf9e3088dc6 100644 (file)
@@ -1007,4 +1007,6 @@ struct drm_radeon_info {
 #define SI_TILE_MODE_DEPTH_STENCIL_2D_4AA      3
 #define SI_TILE_MODE_DEPTH_STENCIL_2D_8AA      2
 
+#define CIK_TILE_MODE_DEPTH_STENCIL_1D         5
+
 #endif
index 3ebe387fea4d809790a26bbfc9aa3cea02f20cfb..382251a1d21403acd817577d83c21f47d0389865 100644 (file)
@@ -7,6 +7,38 @@
  * Copyright (c) 2002-2007 Volkswagen Group Electronic Research
  * All rights reserved.
  *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Volkswagen nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this
+ * software may be distributed under the terms of the GNU General
+ * Public License ("GPL") version 2, in which case the provisions of the
+ * GPL apply INSTEAD OF those given above.
+ *
+ * The provided data structures and external interfaces from this code
+ * are not restricted to be used by modules with a GPL compatible license.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
  */
 
 #ifndef CAN_BCM_H
index 7b7148bded711b1346a2274a09af5b505e7af72e..b632045453202074ada263866052bc2a806e85bc 100644 (file)
@@ -7,6 +7,38 @@
  * Copyright (c) 2002-2007 Volkswagen Group Electronic Research
  * All rights reserved.
  *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Volkswagen nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this
+ * software may be distributed under the terms of the GNU General
+ * Public License ("GPL") version 2, in which case the provisions of the
+ * GPL apply INSTEAD OF those given above.
+ *
+ * The provided data structures and external interfaces from this code
+ * are not restricted to be used by modules with a GPL compatible license.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
  */
 
 #ifndef CAN_ERROR_H
index 4e27c82b564a13a6a4b55860c3cb78b51f86dc7e..844c8964bdfee3a3f4a7308bf0fd832e82754a89 100644 (file)
@@ -7,6 +7,38 @@
  * Copyright (c) 2011 Volkswagen Group Electronic Research
  * All rights reserved.
  *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Volkswagen nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this
+ * software may be distributed under the terms of the GNU General
+ * Public License ("GPL") version 2, in which case the provisions of the
+ * GPL apply INSTEAD OF those given above.
+ *
+ * The provided data structures and external interfaces from this code
+ * are not restricted to be used by modules with a GPL compatible license.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
  */
 
 #ifndef CAN_GW_H
index 14966ddb7df1c5b056578ff622c77cd6400e8094..df944ed206a8e4bd23d9f9a3ec9e08c9e0ad6e3b 100644 (file)
@@ -5,6 +5,14 @@
  *
  * Copyright (c) 2009 Wolfgang Grandegger <wg@grandegger.com>
  *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
  */
 
 #ifndef CAN_NETLINK_H
index a814062b07191819c80812b843a46bd80870344b..c7d8c334e0ce26838c7cc611bd3ad1eb5a31a6c4 100644 (file)
@@ -8,6 +8,38 @@
  * Copyright (c) 2002-2007 Volkswagen Group Electronic Research
  * All rights reserved.
  *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Volkswagen nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this
+ * software may be distributed under the terms of the GNU General
+ * Public License ("GPL") version 2, in which case the provisions of the
+ * GPL apply INSTEAD OF those given above.
+ *
+ * The provided data structures and external interfaces from this code
+ * are not restricted to be used by modules with a GPL compatible license.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
  */
 
 #ifndef CAN_RAW_H
index a17edda8a7816c2ba92e7c1b7aee3cce23b93f8f..9635a62f6f89c781a9086af47df6382c59fea161 100644 (file)
@@ -91,6 +91,8 @@
 #define BOND_XMIT_POLICY_LAYER2                0 /* layer 2 (MAC only), default */
 #define BOND_XMIT_POLICY_LAYER34       1 /* layer 3+4 (IP ^ (TCP || UDP)) */
 #define BOND_XMIT_POLICY_LAYER23       2 /* layer 2+3 (IP ^ MAC) */
+#define BOND_XMIT_POLICY_ENCAP23       3 /* encapsulated layer 2+3 */
+#define BOND_XMIT_POLICY_ENCAP34       4 /* encapsulated layer 3+4 */
 
 typedef struct ifbond {
        __s32 bond_mode;
index 40a1fb8073961425249d50110a6de04f856feac9..009a655a5d354c51e20fb34cb12ca653e94f9b71 100644 (file)
@@ -380,10 +380,13 @@ struct perf_event_mmap_page {
        union {
                __u64   capabilities;
                struct {
-                       __u64   cap_usr_time            : 1,
-                               cap_usr_rdpmc           : 1,
-                               cap_usr_time_zero       : 1,
-                               cap_____res             : 61;
+                       __u64   cap_bit0                : 1, /* Always 0, deprecated, see commit 860f085b74e9 */
+                               cap_bit0_is_deprecated  : 1, /* Always 1, signals that bit 0 is zero */
+
+                               cap_user_rdpmc          : 1, /* The RDPMC instruction can be used to read counts */
+                               cap_user_time           : 1, /* The time_* fields are used */
+                               cap_user_time_zero      : 1, /* The time_zero field is used */
+                               cap_____res             : 59;
                };
        };
 
@@ -442,12 +445,13 @@ struct perf_event_mmap_page {
         *               ((rem * time_mult) >> time_shift);
         */
        __u64   time_zero;
+       __u32   size;                   /* Header size up to __reserved[] fields. */
 
                /*
                 * Hole for extension of the self monitor capabilities
                 */
 
-       __u64   __reserved[119];        /* align to 1k */
+       __u8    __reserved[118*8+4];    /* align to 1k. */
 
        /*
         * Control data for the mmap() data buffer.
@@ -528,6 +532,7 @@ enum perf_event_type {
         *      u64                             len;
         *      u64                             pgoff;
         *      char                            filename[];
+        *      struct sample_id                sample_id;
         * };
         */
        PERF_RECORD_MMAP                        = 1,
index b0d541d426771caec217961b031e3de191135ced..558aa91186b6ced1a27b1e05b65c5ee129e0a175 100644 (file)
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -165,6 +165,15 @@ static inline void msg_rmid(struct ipc_namespace *ns, struct msg_queue *s)
        ipc_rmid(&msg_ids(ns), &s->q_perm);
 }
 
+static void msg_rcu_free(struct rcu_head *head)
+{
+       struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
+       struct msg_queue *msq = ipc_rcu_to_struct(p);
+
+       security_msg_queue_free(msq);
+       ipc_rcu_free(head);
+}
+
 /**
  * newque - Create a new msg queue
  * @ns: namespace
@@ -189,15 +198,14 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params)
        msq->q_perm.security = NULL;
        retval = security_msg_queue_alloc(msq);
        if (retval) {
-               ipc_rcu_putref(msq);
+               ipc_rcu_putref(msq, ipc_rcu_free);
                return retval;
        }
 
        /* ipc_addid() locks msq upon success. */
        id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni);
        if (id < 0) {
-               security_msg_queue_free(msq);
-               ipc_rcu_putref(msq);
+               ipc_rcu_putref(msq, msg_rcu_free);
                return id;
        }
 
@@ -276,8 +284,7 @@ static void freeque(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
                free_msg(msg);
        }
        atomic_sub(msq->q_cbytes, &ns->msg_bytes);
-       security_msg_queue_free(msq);
-       ipc_rcu_putref(msq);
+       ipc_rcu_putref(msq, msg_rcu_free);
 }
 
 /*
@@ -688,6 +695,12 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
                if (ipcperms(ns, &msq->q_perm, S_IWUGO))
                        goto out_unlock0;
 
+               /* raced with RMID? */
+               if (msq->q_perm.deleted) {
+                       err = -EIDRM;
+                       goto out_unlock0;
+               }
+
                err = security_msg_queue_msgsnd(msq, msg, msgflg);
                if (err)
                        goto out_unlock0;
@@ -717,7 +730,7 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
                rcu_read_lock();
                ipc_lock_object(&msq->q_perm);
 
-               ipc_rcu_putref(msq);
+               ipc_rcu_putref(msq, ipc_rcu_free);
                if (msq->q_perm.deleted) {
                        err = -EIDRM;
                        goto out_unlock0;
@@ -894,6 +907,13 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgfl
                        goto out_unlock1;
 
                ipc_lock_object(&msq->q_perm);
+
+               /* raced with RMID? */
+               if (msq->q_perm.deleted) {
+                       msg = ERR_PTR(-EIDRM);
+                       goto out_unlock0;
+               }
+
                msg = find_msg(msq, &msgtyp, mode);
                if (!IS_ERR(msg)) {
                        /*
index 69b6a21f38441aa437a8f79cb53ae4c4a6a520a1..8c4f59b0204a2821811a4b28fd48e0ce6163d987 100644 (file)
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -243,71 +243,122 @@ static void merge_queues(struct sem_array *sma)
        }
 }
 
+static void sem_rcu_free(struct rcu_head *head)
+{
+       struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
+       struct sem_array *sma = ipc_rcu_to_struct(p);
+
+       security_sem_free(sma);
+       ipc_rcu_free(head);
+}
+
+/*
+ * Wait until all currently ongoing simple ops have completed.
+ * Caller must own sem_perm.lock.
+ * New simple ops cannot start, because simple ops first check
+ * that sem_perm.lock is free.
+ * that a) sem_perm.lock is free and b) complex_count is 0.
+ */
+static void sem_wait_array(struct sem_array *sma)
+{
+       int i;
+       struct sem *sem;
+
+       if (sma->complex_count)  {
+               /* The thread that increased sma->complex_count waited on
+                * all sem->lock locks. Thus we don't need to wait again.
+                */
+               return;
+       }
+
+       for (i = 0; i < sma->sem_nsems; i++) {
+               sem = sma->sem_base + i;
+               spin_unlock_wait(&sem->lock);
+       }
+}
+
 /*
  * If the request contains only one semaphore operation, and there are
  * no complex transactions pending, lock only the semaphore involved.
  * Otherwise, lock the entire semaphore array, since we either have
  * multiple semaphores in our own semops, or we need to look at
  * semaphores from other pending complex operations.
- *
- * Carefully guard against sma->complex_count changing between zero
- * and non-zero while we are spinning for the lock. The value of
- * sma->complex_count cannot change while we are holding the lock,
- * so sem_unlock should be fine.
- *
- * The global lock path checks that all the local locks have been released,
- * checking each local lock once. This means that the local lock paths
- * cannot start their critical sections while the global lock is held.
  */
 static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
                              int nsops)
 {
-       int locknum;
- again:
-       if (nsops == 1 && !sma->complex_count) {
-               struct sem *sem = sma->sem_base + sops->sem_num;
+       struct sem *sem;
 
-               /* Lock just the semaphore we are interested in. */
-               spin_lock(&sem->lock);
+       if (nsops != 1) {
+               /* Complex operation - acquire a full lock */
+               ipc_lock_object(&sma->sem_perm);
 
-               /*
-                * If sma->complex_count was set while we were spinning,
-                * we may need to look at things we did not lock here.
+               /* And wait until all simple ops that are processed
+                * right now have dropped their locks.
                 */
-               if (unlikely(sma->complex_count)) {
-                       spin_unlock(&sem->lock);
-                       goto lock_array;
-               }
+               sem_wait_array(sma);
+               return -1;
+       }
+
+       /*
+        * Only one semaphore affected - try to optimize locking.
+        * The rules are:
+        * - optimized locking is possible if no complex operation
+        *   is either enqueued or processed right now.
+        * - The test for enqueued complex ops is simple:
+        *      sma->complex_count != 0
+        * - Testing for complex ops that are processed right now is
+        *   a bit more difficult. Complex ops acquire the full lock
+        *   and first wait that the running simple ops have completed.
+        *   (see above)
+        *   Thus: If we own a simple lock and the global lock is free
+        *      and complex_count is now 0, then it will stay 0 and
+        *      thus just locking sem->lock is sufficient.
+        */
+       sem = sma->sem_base + sops->sem_num;
 
+       if (sma->complex_count == 0) {
                /*
-                * Another process is holding the global lock on the
-                * sem_array; we cannot enter our critical section,
-                * but have to wait for the global lock to be released.
+                * It appears that no complex operation is around.
+                * Acquire the per-semaphore lock.
                 */
-               if (unlikely(spin_is_locked(&sma->sem_perm.lock))) {
-                       spin_unlock(&sem->lock);
-                       spin_unlock_wait(&sma->sem_perm.lock);
-                       goto again;
+               spin_lock(&sem->lock);
+
+               /* Then check that the global lock is free */
+               if (!spin_is_locked(&sma->sem_perm.lock)) {
+                       /* spin_is_locked() is not a memory barrier */
+                       smp_mb();
+
+                       /* Now repeat the test of complex_count:
+                        * It can't change anymore until we drop sem->lock.
+                        * Thus: if is now 0, then it will stay 0.
+                        */
+                       if (sma->complex_count == 0) {
+                               /* fast path successful! */
+                               return sops->sem_num;
+                       }
                }
+               spin_unlock(&sem->lock);
+       }
 
-               locknum = sops->sem_num;
+       /* slow path: acquire the full lock */
+       ipc_lock_object(&sma->sem_perm);
+
+       if (sma->complex_count == 0) {
+               /* False alarm:
+                * There is no complex operation, thus we can switch
+                * back to the fast path.
+                */
+               spin_lock(&sem->lock);
+               ipc_unlock_object(&sma->sem_perm);
+               return sops->sem_num;
        } else {
-               int i;
-               /*
-                * Lock the semaphore array, and wait for all of the
-                * individual semaphore locks to go away.  The code
-                * above ensures no new single-lock holders will enter
-                * their critical section while the array lock is held.
+               /* Not a false alarm, thus complete the sequence for a
+                * full lock.
                 */
- lock_array:
-               ipc_lock_object(&sma->sem_perm);
-               for (i = 0; i < sma->sem_nsems; i++) {
-                       struct sem *sem = sma->sem_base + i;
-                       spin_unlock_wait(&sem->lock);
-               }
-               locknum = -1;
+               sem_wait_array(sma);
+               return -1;
        }
-       return locknum;
 }
 
 static inline void sem_unlock(struct sem_array *sma, int locknum)
@@ -374,12 +425,7 @@ static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns
 static inline void sem_lock_and_putref(struct sem_array *sma)
 {
        sem_lock(sma, NULL, -1);
-       ipc_rcu_putref(sma);
-}
-
-static inline void sem_putref(struct sem_array *sma)
-{
-       ipc_rcu_putref(sma);
+       ipc_rcu_putref(sma, ipc_rcu_free);
 }
 
 static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
@@ -458,14 +504,13 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
        sma->sem_perm.security = NULL;
        retval = security_sem_alloc(sma);
        if (retval) {
-               ipc_rcu_putref(sma);
+               ipc_rcu_putref(sma, ipc_rcu_free);
                return retval;
        }
 
        id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
        if (id < 0) {
-               security_sem_free(sma);
-               ipc_rcu_putref(sma);
+               ipc_rcu_putref(sma, sem_rcu_free);
                return id;
        }
        ns->used_sems += nsems;
@@ -872,6 +917,24 @@ again:
        return semop_completed;
 }
 
+/**
+ * set_semotime(sma, sops) - set sem_otime
+ * @sma: semaphore array
+ * @sops: operations that modified the array, may be NULL
+ *
+ * sem_otime is replicated to avoid cache line trashing.
+ * This function sets one instance to the current time.
+ */
+static void set_semotime(struct sem_array *sma, struct sembuf *sops)
+{
+       if (sops == NULL) {
+               sma->sem_base[0].sem_otime = get_seconds();
+       } else {
+               sma->sem_base[sops[0].sem_num].sem_otime =
+                                                       get_seconds();
+       }
+}
+
 /**
  * do_smart_update(sma, sops, nsops, otime, pt) - optimized update_queue
  * @sma: semaphore array
@@ -922,17 +985,10 @@ static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsop
                        }
                }
        }
-       if (otime) {
-               if (sops == NULL) {
-                       sma->sem_base[0].sem_otime = get_seconds();
-               } else {
-                       sma->sem_base[sops[0].sem_num].sem_otime =
-                                                               get_seconds();
-               }
-       }
+       if (otime)
+               set_semotime(sma, sops);
 }
 
-
 /* The following counts are associated to each semaphore:
  *   semncnt        number of tasks waiting on semval being nonzero
  *   semzcnt        number of tasks waiting on semval being zero
@@ -1047,8 +1103,7 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
 
        wake_up_sem_queue_do(&tasks);
        ns->used_sems -= sma->sem_nsems;
-       security_sem_free(sma);
-       ipc_rcu_putref(sma);
+       ipc_rcu_putref(sma, sem_rcu_free);
 }
 
 static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version)
@@ -1292,7 +1347,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
                        rcu_read_unlock();
                        sem_io = ipc_alloc(sizeof(ushort)*nsems);
                        if(sem_io == NULL) {
-                               sem_putref(sma);
+                               ipc_rcu_putref(sma, ipc_rcu_free);
                                return -ENOMEM;
                        }
 
@@ -1328,20 +1383,20 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
                if(nsems > SEMMSL_FAST) {
                        sem_io = ipc_alloc(sizeof(ushort)*nsems);
                        if(sem_io == NULL) {
-                               sem_putref(sma);
+                               ipc_rcu_putref(sma, ipc_rcu_free);
                                return -ENOMEM;
                        }
                }
 
                if (copy_from_user (sem_io, p, nsems*sizeof(ushort))) {
-                       sem_putref(sma);
+                       ipc_rcu_putref(sma, ipc_rcu_free);
                        err = -EFAULT;
                        goto out_free;
                }
 
                for (i = 0; i < nsems; i++) {
                        if (sem_io[i] > SEMVMX) {
-                               sem_putref(sma);
+                               ipc_rcu_putref(sma, ipc_rcu_free);
                                err = -ERANGE;
                                goto out_free;
                        }
@@ -1629,7 +1684,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
        /* step 2: allocate new undo structure */
        new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
        if (!new) {
-               sem_putref(sma);
+               ipc_rcu_putref(sma, ipc_rcu_free);
                return ERR_PTR(-ENOMEM);
        }
 
@@ -1795,12 +1850,17 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
 
        error = perform_atomic_semop(sma, sops, nsops, un,
                                        task_tgid_vnr(current));
-       if (error <= 0) {
-               if (alter && error == 0)
+       if (error == 0) {
+               /* If the operation was successful, then do
+                * the required updates.
+                */
+               if (alter)
                        do_smart_update(sma, sops, nsops, 1, &tasks);
-
-               goto out_unlock_free;
+               else
+                       set_semotime(sma, sops);
        }
+       if (error <= 0)
+               goto out_unlock_free;
 
        /* We need to sleep on this operation, so we put the current
         * task into the pending queue and go to sleep.
@@ -2059,6 +2119,14 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
        struct sem_array *sma = it;
        time_t sem_otime;
 
+       /*
+        * The proc interface isn't aware of sem_lock(), it calls
+        * ipc_lock_object() directly (in sysvipc_find_ipc).
+        * In order to stay compatible with sem_lock(), we must wait until
+        * all simple semop() calls have left their critical regions.
+        */
+       sem_wait_array(sma);
+
        sem_otime = get_semotime(sma);
 
        return seq_printf(s,
index 2821cdf93adb39ac83f8a604e6f487590bfe01f7..d69739610fd4384323004c46782116d54db6bbd5 100644 (file)
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -167,6 +167,15 @@ static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
        ipc_lock_object(&ipcp->shm_perm);
 }
 
+static void shm_rcu_free(struct rcu_head *head)
+{
+       struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
+       struct shmid_kernel *shp = ipc_rcu_to_struct(p);
+
+       security_shm_free(shp);
+       ipc_rcu_free(head);
+}
+
 static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
 {
        ipc_rmid(&shm_ids(ns), &s->shm_perm);
@@ -208,8 +217,7 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
                user_shm_unlock(file_inode(shp->shm_file)->i_size,
                                                shp->mlock_user);
        fput (shp->shm_file);
-       security_shm_free(shp);
-       ipc_rcu_putref(shp);
+       ipc_rcu_putref(shp, shm_rcu_free);
 }
 
 /*
@@ -497,7 +505,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
        shp->shm_perm.security = NULL;
        error = security_shm_alloc(shp);
        if (error) {
-               ipc_rcu_putref(shp);
+               ipc_rcu_putref(shp, ipc_rcu_free);
                return error;
        }
 
@@ -566,8 +574,7 @@ no_id:
                user_shm_unlock(size, shp->mlock_user);
        fput(file);
 no_file:
-       security_shm_free(shp);
-       ipc_rcu_putref(shp);
+       ipc_rcu_putref(shp, shm_rcu_free);
        return error;
 }
 
index e829da9ed01f3dbe2973fd232a4f2b889f401c9c..fdb8ae7407755f9b1c3725e2a7fd38a8a31c7f54 100644 (file)
@@ -474,11 +474,6 @@ void ipc_free(void* ptr, int size)
                kfree(ptr);
 }
 
-struct ipc_rcu {
-       struct rcu_head rcu;
-       atomic_t refcount;
-} ____cacheline_aligned_in_smp;
-
 /**
  *     ipc_rcu_alloc   -       allocate ipc and rcu space 
  *     @size: size desired
@@ -505,27 +500,24 @@ int ipc_rcu_getref(void *ptr)
        return atomic_inc_not_zero(&p->refcount);
 }
 
-/**
- * ipc_schedule_free - free ipc + rcu space
- * @head: RCU callback structure for queued work
- */
-static void ipc_schedule_free(struct rcu_head *head)
-{
-       vfree(container_of(head, struct ipc_rcu, rcu));
-}
-
-void ipc_rcu_putref(void *ptr)
+void ipc_rcu_putref(void *ptr, void (*func)(struct rcu_head *head))
 {
        struct ipc_rcu *p = ((struct ipc_rcu *)ptr) - 1;
 
        if (!atomic_dec_and_test(&p->refcount))
                return;
 
-       if (is_vmalloc_addr(ptr)) {
-               call_rcu(&p->rcu, ipc_schedule_free);
-       } else {
-               kfree_rcu(p, rcu);
-       }
+       call_rcu(&p->rcu, func);
+}
+
+void ipc_rcu_free(struct rcu_head *head)
+{
+       struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
+
+       if (is_vmalloc_addr(p))
+               vfree(p);
+       else
+               kfree(p);
 }
 
 /**
index c5f3338ba1fa7913967c1bc7e9845e0561eeb027..f2f5036f2eeda9794bc545ac8045db3c56f7d425 100644 (file)
@@ -47,6 +47,13 @@ static inline void msg_exit_ns(struct ipc_namespace *ns) { }
 static inline void shm_exit_ns(struct ipc_namespace *ns) { }
 #endif
 
+struct ipc_rcu {
+       struct rcu_head rcu;
+       atomic_t refcount;
+} ____cacheline_aligned_in_smp;
+
+#define ipc_rcu_to_struct(p)  ((void *)(p+1))
+
 /*
  * Structure that holds the parameters needed by the ipc operations
  * (see after)
@@ -120,7 +127,8 @@ void ipc_free(void* ptr, int size);
  */
 void* ipc_rcu_alloc(int size);
 int ipc_rcu_getref(void *ptr);
-void ipc_rcu_putref(void *ptr);
+void ipc_rcu_putref(void *ptr, void (*func)(struct rcu_head *head));
+void ipc_rcu_free(struct rcu_head *head);
 
 struct kern_ipc_perm *ipc_lock(struct ipc_ids *, int);
 struct kern_ipc_perm *ipc_obtain_object(struct ipc_ids *ids, int id);
index 91e53d04b6a9e8841e697dcb290f1206468da21a..7b0e23a740ce345987c33f9e012302c24de0f4db 100644 (file)
@@ -1117,9 +1117,10 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
 
                        sleep_time = timeout_start + audit_backlog_wait_time -
                                        jiffies;
-                       if ((long)sleep_time > 0)
+                       if ((long)sleep_time > 0) {
                                wait_for_auditd(sleep_time);
-                       continue;
+                               continue;
+                       }
                }
                if (audit_rate_check() && printk_ratelimit())
                        printk(KERN_WARNING
index 247091bf0587a479594776be800db9164a10ae8b..859c8dfd78a1a5b296dd5c6c23d741e7a4c1e8cd 100644 (file)
@@ -50,6 +50,15 @@ void context_tracking_user_enter(void)
 {
        unsigned long flags;
 
+       /*
+        * Repeat the user_enter() check here because some archs may be calling
+        * this from asm and if no CPU needs context tracking, they shouldn't
+        * go further. Repeat the check here until they support the static key
+        * check.
+        */
+       if (!static_key_false(&context_tracking_enabled))
+               return;
+
        /*
         * Some contexts may involve an exception occuring in an irq,
         * leading to that nesting:
@@ -151,6 +160,9 @@ void context_tracking_user_exit(void)
 {
        unsigned long flags;
 
+       if (!static_key_false(&context_tracking_enabled))
+               return;
+
        if (in_interrupt())
                return;
 
index dd236b66ca3a8ef894386e7dcb8c7f02dbba34ae..cb4238e85b38e37886b27dd76351d2590a835924 100644 (file)
@@ -3660,6 +3660,26 @@ static void calc_timer_values(struct perf_event *event,
        *running = ctx_time - event->tstamp_running;
 }
 
+static void perf_event_init_userpage(struct perf_event *event)
+{
+       struct perf_event_mmap_page *userpg;
+       struct ring_buffer *rb;
+
+       rcu_read_lock();
+       rb = rcu_dereference(event->rb);
+       if (!rb)
+               goto unlock;
+
+       userpg = rb->user_page;
+
+       /* Allow new userspace to detect that bit 0 is deprecated */
+       userpg->cap_bit0_is_deprecated = 1;
+       userpg->size = offsetof(struct perf_event_mmap_page, __reserved);
+
+unlock:
+       rcu_read_unlock();
+}
+
 void __weak arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
 {
 }
@@ -4044,6 +4064,7 @@ again:
        ring_buffer_attach(event, rb);
        rcu_assign_pointer(event->rb, rb);
 
+       perf_event_init_userpage(event);
        perf_event_update_userpage(event);
 
 unlock:
index fb326365b69466158b03b726e53c4fe15448fa89..b086006c59e7c6957a51984a3ec101ea2db8525d 100644 (file)
@@ -571,6 +571,10 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait)
        DECLARE_COMPLETION_ONSTACK(done);
        int retval = 0;
 
+       if (!sub_info->path) {
+               call_usermodehelper_freeinfo(sub_info);
+               return -EINVAL;
+       }
        helper_lock();
        if (!khelper_wq || usermodehelper_disabled) {
                retval = -EBUSY;
index 81c4e78c8f4cc0b79b89086c3255934079082380..c00d5b502aa487de0f09117c21dfa12eb8923e0c 100644 (file)
@@ -254,11 +254,11 @@ int parse_args(const char *doing,
 
 
 STANDARD_PARAM_DEF(byte, unsigned char, "%hhu", unsigned long, kstrtoul);
-STANDARD_PARAM_DEF(short, short, "%hi", long, kstrtoul);
+STANDARD_PARAM_DEF(short, short, "%hi", long, kstrtol);
 STANDARD_PARAM_DEF(ushort, unsigned short, "%hu", unsigned long, kstrtoul);
-STANDARD_PARAM_DEF(int, int, "%i", long, kstrtoul);
+STANDARD_PARAM_DEF(int, int, "%i", long, kstrtol);
 STANDARD_PARAM_DEF(uint, unsigned int, "%u", unsigned long, kstrtoul);
-STANDARD_PARAM_DEF(long, long, "%li", long, kstrtoul);
+STANDARD_PARAM_DEF(long, long, "%li", long, kstrtol);
 STANDARD_PARAM_DEF(ulong, unsigned long, "%lu", unsigned long, kstrtoul);
 
 int param_set_charp(const char *val, const struct kernel_param *kp)
index ebe5e80b10f8495a3d6459e0203821f987237df0..9b9a26698144e126486e162955e49561514b1ce8 100644 (file)
@@ -273,6 +273,11 @@ void free_pid(struct pid *pid)
                         */
                        wake_up_process(ns->child_reaper);
                        break;
+               case PIDNS_HASH_ADDING:
+                       /* Handle a fork failure of the first process */
+                       WARN_ON(ns->child_reaper);
+                       ns->nr_hashed = 0;
+                       /* fall through */
                case 0:
                        schedule_work(&ns->proc_work);
                        break;
index 269ed9384cc4284e9cf6043bd00667ea582c4369..f813b3474646c5b320a19d9a8997349bdd14d68e 100644 (file)
@@ -32,7 +32,14 @@ EXPORT_SYMBOL(cad_pid);
 #endif
 enum reboot_mode reboot_mode DEFAULT_REBOOT_MODE;
 
-int reboot_default;
+/*
+ * This variable is used privately to keep track of whether or not
+ * reboot_type is still set to its default value (i.e., reboot= hasn't
+ * been set on the command line).  This is needed so that we can
+ * suppress DMI scanning for reboot quirks.  Without it, it's
+ * impossible to override a faulty reboot quirk without recompiling.
+ */
+int reboot_default = 1;
 int reboot_cpu;
 enum reboot_type reboot_type = BOOT_ACPI;
 int reboot_force;
index 11cd13667359862c58872a9ce6091391a1d40b86..7c70201fbc61aef012d5b2536600d898602bab0a 100644 (file)
@@ -4242,7 +4242,7 @@ static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
        }
 
        if (!se) {
-               cfs_rq->h_load = rq->avg.load_avg_contrib;
+               cfs_rq->h_load = cfs_rq->runnable_load_avg;
                cfs_rq->last_h_load_update = now;
        }
 
@@ -4823,8 +4823,8 @@ void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
                (busiest->load_per_task * SCHED_POWER_SCALE) /
                busiest->group_power;
 
-       if (busiest->avg_load - local->avg_load + scaled_busy_load_per_task >=
-           (scaled_busy_load_per_task * imbn)) {
+       if (busiest->avg_load + scaled_busy_load_per_task >=
+           local->avg_load + (scaled_busy_load_per_task * imbn)) {
                env->imbalance = busiest->load_per_task;
                return;
        }
@@ -4896,7 +4896,8 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
         * max load less than avg load(as we skip the groups at or below
         * its cpu_power, while calculating max_load..)
         */
-       if (busiest->avg_load < sds->avg_load) {
+       if (busiest->avg_load <= sds->avg_load ||
+           local->avg_load >= sds->avg_load) {
                env->imbalance = 0;
                return fix_small_imbalance(env, sds);
        }
index 51c4f34d258ea397266e0dd1a96a16436415a38f..4431610f049ac77888adefe3a335d47d4d232939 100644 (file)
@@ -486,7 +486,52 @@ static struct smp_hotplug_thread watchdog_threads = {
        .unpark                 = watchdog_enable,
 };
 
-static int watchdog_enable_all_cpus(void)
+static void restart_watchdog_hrtimer(void *info)
+{
+       struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
+       int ret;
+
+       /*
+        * No need to cancel and restart hrtimer if it is currently executing
+        * because it will reprogram itself with the new period now.
+        * We should never see it unqueued here because we are running per-cpu
+        * with interrupts disabled.
+        */
+       ret = hrtimer_try_to_cancel(hrtimer);
+       if (ret == 1)
+               hrtimer_start(hrtimer, ns_to_ktime(sample_period),
+                               HRTIMER_MODE_REL_PINNED);
+}
+
+static void update_timers(int cpu)
+{
+       struct call_single_data data = {.func = restart_watchdog_hrtimer};
+       /*
+        * Make sure that perf event counter will adopt to a new
+        * sampling period. Updating the sampling period directly would
+        * be much nicer but we do not have an API for that now so
+        * let's use a big hammer.
+        * Hrtimer will adopt the new period on the next tick but this
+        * might be late already so we have to restart the timer as well.
+        */
+       watchdog_nmi_disable(cpu);
+       __smp_call_function_single(cpu, &data, 1);
+       watchdog_nmi_enable(cpu);
+}
+
+static void update_timers_all_cpus(void)
+{
+       int cpu;
+
+       get_online_cpus();
+       preempt_disable();
+       for_each_online_cpu(cpu)
+               update_timers(cpu);
+       preempt_enable();
+       put_online_cpus();
+}
+
+static int watchdog_enable_all_cpus(bool sample_period_changed)
 {
        int err = 0;
 
@@ -496,6 +541,8 @@ static int watchdog_enable_all_cpus(void)
                        pr_err("Failed to create watchdog threads, disabled\n");
                else
                        watchdog_running = 1;
+       } else if (sample_period_changed) {
+               update_timers_all_cpus();
        }
 
        return err;
@@ -520,13 +567,15 @@ int proc_dowatchdog(struct ctl_table *table, int write,
                    void __user *buffer, size_t *lenp, loff_t *ppos)
 {
        int err, old_thresh, old_enabled;
+       static DEFINE_MUTEX(watchdog_proc_mutex);
 
+       mutex_lock(&watchdog_proc_mutex);
        old_thresh = ACCESS_ONCE(watchdog_thresh);
        old_enabled = ACCESS_ONCE(watchdog_user_enabled);
 
        err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
        if (err || !write)
-               return err;
+               goto out;
 
        set_sample_period();
        /*
@@ -535,7 +584,7 @@ int proc_dowatchdog(struct ctl_table *table, int write,
         * watchdog_*_all_cpus() function takes care of this.
         */
        if (watchdog_user_enabled && watchdog_thresh)
-               err = watchdog_enable_all_cpus();
+               err = watchdog_enable_all_cpus(old_thresh != watchdog_thresh);
        else
                watchdog_disable_all_cpus();
 
@@ -544,7 +593,8 @@ int proc_dowatchdog(struct ctl_table *table, int write,
                watchdog_thresh = old_thresh;
                watchdog_user_enabled = old_enabled;
        }
-
+out:
+       mutex_unlock(&watchdog_proc_mutex);
        return err;
 }
 #endif /* CONFIG_SYSCTL */
@@ -554,5 +604,5 @@ void __init lockup_detector_init(void)
        set_sample_period();
 
        if (watchdog_user_enabled)
-               watchdog_enable_all_cpus();
+               watchdog_enable_all_cpus(false);
 }
index 3f0494c9d57aaf24cbbea90231e010d1d06f3e04..8499c810909a58ae100b7db96a01ea74b5c14948 100644 (file)
@@ -14,6 +14,8 @@
 
 const char hex_asc[] = "0123456789abcdef";
 EXPORT_SYMBOL(hex_asc);
+const char hex_asc_upper[] = "0123456789ABCDEF";
+EXPORT_SYMBOL(hex_asc_upper);
 
 /**
  * hex_to_bin - convert a hex digit to its real value
index 962175134702dace0078edfd0ec7f092bc646536..669bf190d4fb91770f8912277278a6528c4065c3 100644 (file)
@@ -933,10 +933,7 @@ const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj)
 
 bool kobj_ns_current_may_mount(enum kobj_ns_type type)
 {
-       bool may_mount = false;
-
-       if (type == KOBJ_NS_TYPE_NONE)
-               return true;
+       bool may_mount = true;
 
        spin_lock(&kobj_ns_type_lock);
        if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES) &&
index e2cd2c0a882126c58e04e47102fb4975c5247d3c..6f9d434c1521eab9ca0b2821d10936af55f2b703 100644 (file)
@@ -3,6 +3,22 @@
 
 #ifdef CONFIG_CMPXCHG_LOCKREF
 
+/*
+ * Allow weakly-ordered memory architectures to provide barrier-less
+ * cmpxchg semantics for lockref updates.
+ */
+#ifndef cmpxchg64_relaxed
+# define cmpxchg64_relaxed cmpxchg64
+#endif
+
+/*
+ * Allow architectures to override the default cpu_relax() within CMPXCHG_LOOP.
+ * This is useful for architectures with an expensive cpu_relax().
+ */
+#ifndef arch_mutex_cpu_relax
+# define arch_mutex_cpu_relax() cpu_relax()
+#endif
+
 /*
  * Note that the "cmpxchg()" reloads the "old" value for the
  * failure case.
        while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) {     \
                struct lockref new = old, prev = old;                           \
                CODE                                                            \
-               old.lock_count = cmpxchg(&lockref->lock_count,                  \
-                                        old.lock_count, new.lock_count);       \
+               old.lock_count = cmpxchg64_relaxed(&lockref->lock_count,        \
+                                                  old.lock_count,              \
+                                                  new.lock_count);             \
                if (likely(old.lock_count == prev.lock_count)) {                \
                        SUCCESS;                                                \
                }                                                               \
-               cpu_relax();                                                    \
+               arch_mutex_cpu_relax();                                         \
        }                                                                       \
 } while (0)
 
index c9f0a4339a7dafc2ba7295e49ad8fcdda8fa13de..5a7d58fb883bfa1c4917e48d251cd132c8d9baf9 100644 (file)
@@ -204,6 +204,8 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
        struct bio_vec *to, *from;
        unsigned i;
 
+       if (force)
+               goto bounce;
        bio_for_each_segment(from, *bio_orig, i)
                if (page_to_pfn(from->bv_page) > queue_bounce_pfn(q))
                        goto bounce;
index c43789388cd8667536bfd9644a5bbe2cd0d35f3e..b5326b141a251905a96cd5cd995063a29ca20f91 100644 (file)
@@ -677,6 +677,13 @@ static void isolate_freepages(struct zone *zone,
                                        pfn -= pageblock_nr_pages) {
                unsigned long isolated;
 
+               /*
+                * This can iterate a massively long zone without finding any
+                * suitable migration targets, so periodically check if we need
+                * to schedule.
+                */
+               cond_resched();
+
                if (!pfn_valid(pfn))
                        continue;
 
index afc2daa91c609dd8aab70f50fb58fd9100ed8326..4c84678371eb5b5905cc8c4386b512ec57e4f5e3 100644 (file)
@@ -20,8 +20,6 @@ static int hwpoison_inject(void *data, u64 val)
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
 
-       if (!hwpoison_filter_enable)
-               goto inject;
        if (!pfn_valid(pfn))
                return -ENXIO;
 
@@ -33,6 +31,9 @@ static int hwpoison_inject(void *data, u64 val)
        if (!get_page_unless_zero(hpage))
                return 0;
 
+       if (!hwpoison_filter_enable)
+               goto inject;
+
        if (!PageLRU(p) && !PageHuge(p))
                shake_page(p, 0);
        /*
index 6975bc812542d2c13642363d928005f355199c54..539eeb96b323bf649f83783e0dddcb4f907e1d6e 100644 (file)
@@ -343,10 +343,11 @@ static long madvise_remove(struct vm_area_struct *vma,
  */
 static int madvise_hwpoison(int bhv, unsigned long start, unsigned long end)
 {
+       struct page *p;
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
-       for (; start < end; start += PAGE_SIZE) {
-               struct page *p;
+       for (; start < end; start += PAGE_SIZE <<
+                               compound_order(compound_head(p))) {
                int ret;
 
                ret = get_user_pages_fast(start, 1, 0, &p);
index d5ff3ce13029b2c99b4ed402898ae0c76a143fde..1c52ddbc839ba1f8f42e940c51bc321ba6b2abfe 100644 (file)
@@ -39,6 +39,7 @@
 #include <linux/limits.h>
 #include <linux/export.h>
 #include <linux/mutex.h>
+#include <linux/rbtree.h>
 #include <linux/slab.h>
 #include <linux/swap.h>
 #include <linux/swapops.h>
@@ -160,6 +161,10 @@ struct mem_cgroup_per_zone {
 
        struct mem_cgroup_reclaim_iter reclaim_iter[DEF_PRIORITY + 1];
 
+       struct rb_node          tree_node;      /* RB tree node */
+       unsigned long long      usage_in_excess;/* Set to the value by which */
+                                               /* the soft limit is exceeded*/
+       bool                    on_tree;
        struct mem_cgroup       *memcg;         /* Back pointer, we cannot */
                                                /* use container_of        */
 };
@@ -168,6 +173,26 @@ struct mem_cgroup_per_node {
        struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
 };
 
+/*
+ * Cgroups above their limits are maintained in a RB-Tree, independent of
+ * their hierarchy representation
+ */
+
+struct mem_cgroup_tree_per_zone {
+       struct rb_root rb_root;
+       spinlock_t lock;
+};
+
+struct mem_cgroup_tree_per_node {
+       struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES];
+};
+
+struct mem_cgroup_tree {
+       struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
+};
+
+static struct mem_cgroup_tree soft_limit_tree __read_mostly;
+
 struct mem_cgroup_threshold {
        struct eventfd_ctx *eventfd;
        u64 threshold;
@@ -303,22 +328,6 @@ struct mem_cgroup {
        atomic_t        numainfo_events;
        atomic_t        numainfo_updating;
 #endif
-       /*
-        * Protects soft_contributed transitions.
-        * See mem_cgroup_update_soft_limit
-        */
-       spinlock_t soft_lock;
-
-       /*
-        * If true then this group has increased parents' children_in_excess
-        * when it got over the soft limit.
-        * When a group falls bellow the soft limit, parents' children_in_excess
-        * is decreased and soft_contributed changed to false.
-        */
-       bool soft_contributed;
-
-       /* Number of children that are in soft limit excess */
-       atomic_t children_in_excess;
 
        struct mem_cgroup_per_node *nodeinfo[0];
        /* WARNING: nodeinfo must be the last member here */
@@ -422,6 +431,7 @@ static bool move_file(void)
  * limit reclaim to prevent infinite loops, if they ever occur.
  */
 #define        MEM_CGROUP_MAX_RECLAIM_LOOPS            100
+#define        MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2
 
 enum charge_type {
        MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
@@ -648,6 +658,164 @@ page_cgroup_zoneinfo(struct mem_cgroup *memcg, struct page *page)
        return mem_cgroup_zoneinfo(memcg, nid, zid);
 }
 
+static struct mem_cgroup_tree_per_zone *
+soft_limit_tree_node_zone(int nid, int zid)
+{
+       return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
+}
+
+static struct mem_cgroup_tree_per_zone *
+soft_limit_tree_from_page(struct page *page)
+{
+       int nid = page_to_nid(page);
+       int zid = page_zonenum(page);
+
+       return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
+}
+
+static void
+__mem_cgroup_insert_exceeded(struct mem_cgroup *memcg,
+                               struct mem_cgroup_per_zone *mz,
+                               struct mem_cgroup_tree_per_zone *mctz,
+                               unsigned long long new_usage_in_excess)
+{
+       struct rb_node **p = &mctz->rb_root.rb_node;
+       struct rb_node *parent = NULL;
+       struct mem_cgroup_per_zone *mz_node;
+
+       if (mz->on_tree)
+               return;
+
+       mz->usage_in_excess = new_usage_in_excess;
+       if (!mz->usage_in_excess)
+               return;
+       while (*p) {
+               parent = *p;
+               mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
+                                       tree_node);
+               if (mz->usage_in_excess < mz_node->usage_in_excess)
+                       p = &(*p)->rb_left;
+               /*
+                * We can't avoid mem cgroups that are over their soft
+                * limit by the same amount
+                */
+               else if (mz->usage_in_excess >= mz_node->usage_in_excess)
+                       p = &(*p)->rb_right;
+       }
+       rb_link_node(&mz->tree_node, parent, p);
+       rb_insert_color(&mz->tree_node, &mctz->rb_root);
+       mz->on_tree = true;
+}
+
+static void
+__mem_cgroup_remove_exceeded(struct mem_cgroup *memcg,
+                               struct mem_cgroup_per_zone *mz,
+                               struct mem_cgroup_tree_per_zone *mctz)
+{
+       if (!mz->on_tree)
+               return;
+       rb_erase(&mz->tree_node, &mctz->rb_root);
+       mz->on_tree = false;
+}
+
+static void
+mem_cgroup_remove_exceeded(struct mem_cgroup *memcg,
+                               struct mem_cgroup_per_zone *mz,
+                               struct mem_cgroup_tree_per_zone *mctz)
+{
+       spin_lock(&mctz->lock);
+       __mem_cgroup_remove_exceeded(memcg, mz, mctz);
+       spin_unlock(&mctz->lock);
+}
+
+
+static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
+{
+       unsigned long long excess;
+       struct mem_cgroup_per_zone *mz;
+       struct mem_cgroup_tree_per_zone *mctz;
+       int nid = page_to_nid(page);
+       int zid = page_zonenum(page);
+       mctz = soft_limit_tree_from_page(page);
+
+       /*
+        * Necessary to update all ancestors when hierarchy is used.
+        * because their event counter is not touched.
+        */
+       for (; memcg; memcg = parent_mem_cgroup(memcg)) {
+               mz = mem_cgroup_zoneinfo(memcg, nid, zid);
+               excess = res_counter_soft_limit_excess(&memcg->res);
+               /*
+                * We have to update the tree if mz is on RB-tree or
+                * mem is over its softlimit.
+                */
+               if (excess || mz->on_tree) {
+                       spin_lock(&mctz->lock);
+                       /* if on-tree, remove it */
+                       if (mz->on_tree)
+                               __mem_cgroup_remove_exceeded(memcg, mz, mctz);
+                       /*
+                        * Insert again. mz->usage_in_excess will be updated.
+                        * If excess is 0, no tree ops.
+                        */
+                       __mem_cgroup_insert_exceeded(memcg, mz, mctz, excess);
+                       spin_unlock(&mctz->lock);
+               }
+       }
+}
+
+static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
+{
+       int node, zone;
+       struct mem_cgroup_per_zone *mz;
+       struct mem_cgroup_tree_per_zone *mctz;
+
+       for_each_node(node) {
+               for (zone = 0; zone < MAX_NR_ZONES; zone++) {
+                       mz = mem_cgroup_zoneinfo(memcg, node, zone);
+                       mctz = soft_limit_tree_node_zone(node, zone);
+                       mem_cgroup_remove_exceeded(memcg, mz, mctz);
+               }
+       }
+}
+
+static struct mem_cgroup_per_zone *
+__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
+{
+       struct rb_node *rightmost = NULL;
+       struct mem_cgroup_per_zone *mz;
+
+retry:
+       mz = NULL;
+       rightmost = rb_last(&mctz->rb_root);
+       if (!rightmost)
+               goto done;              /* Nothing to reclaim from */
+
+       mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node);
+       /*
+        * Remove the node now but someone else can add it back,
+        * we will to add it back at the end of reclaim to its correct
+        * position in the tree.
+        */
+       __mem_cgroup_remove_exceeded(mz->memcg, mz, mctz);
+       if (!res_counter_soft_limit_excess(&mz->memcg->res) ||
+               !css_tryget(&mz->memcg->css))
+               goto retry;
+done:
+       return mz;
+}
+
+static struct mem_cgroup_per_zone *
+mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
+{
+       struct mem_cgroup_per_zone *mz;
+
+       spin_lock(&mctz->lock);
+       mz = __mem_cgroup_largest_soft_limit_node(mctz);
+       spin_unlock(&mctz->lock);
+       return mz;
+}
+
 /*
  * Implementation Note: reading percpu statistics for memcg.
  *
@@ -821,48 +989,6 @@ static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
        return false;
 }
 
-/*
- * Called from rate-limited memcg_check_events when enough
- * MEM_CGROUP_TARGET_SOFTLIMIT events are accumulated and it makes sure
- * that all the parents up the hierarchy will be notified that this group
- * is in excess or that it is not in excess anymore. mmecg->soft_contributed
- * makes the transition a single action whenever the state flips from one to
- * the other.
- */
-static void mem_cgroup_update_soft_limit(struct mem_cgroup *memcg)
-{
-       unsigned long long excess = res_counter_soft_limit_excess(&memcg->res);
-       struct mem_cgroup *parent = memcg;
-       int delta = 0;
-
-       spin_lock(&memcg->soft_lock);
-       if (excess) {
-               if (!memcg->soft_contributed) {
-                       delta = 1;
-                       memcg->soft_contributed = true;
-               }
-       } else {
-               if (memcg->soft_contributed) {
-                       delta = -1;
-                       memcg->soft_contributed = false;
-               }
-       }
-
-       /*
-        * Necessary to update all ancestors when hierarchy is used
-        * because their event counter is not touched.
-        * We track children even outside the hierarchy for the root
-        * cgroup because tree walk starting at root should visit
-        * all cgroups and we want to prevent from pointless tree
-        * walk if no children is below the limit.
-        */
-       while (delta && (parent = parent_mem_cgroup(parent)))
-               atomic_add(delta, &parent->children_in_excess);
-       if (memcg != root_mem_cgroup && !root_mem_cgroup->use_hierarchy)
-               atomic_add(delta, &root_mem_cgroup->children_in_excess);
-       spin_unlock(&memcg->soft_lock);
-}
-
 /*
  * Check events in order.
  *
@@ -886,7 +1012,7 @@ static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
 
                mem_cgroup_threshold(memcg);
                if (unlikely(do_softlimit))
-                       mem_cgroup_update_soft_limit(memcg);
+                       mem_cgroup_update_tree(memcg, page);
 #if MAX_NUMNODES > 1
                if (unlikely(do_numainfo))
                        atomic_inc(&memcg->numainfo_events);
@@ -929,15 +1055,6 @@ struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
        return memcg;
 }
 
-static enum mem_cgroup_filter_t
-mem_cgroup_filter(struct mem_cgroup *memcg, struct mem_cgroup *root,
-               mem_cgroup_iter_filter cond)
-{
-       if (!cond)
-               return VISIT;
-       return cond(memcg, root);
-}
-
 /*
  * Returns a next (in a pre-order walk) alive memcg (with elevated css
  * ref. count) or NULL if the whole root's subtree has been visited.
@@ -945,7 +1062,7 @@ mem_cgroup_filter(struct mem_cgroup *memcg, struct mem_cgroup *root,
  * helper function to be used by mem_cgroup_iter
  */
 static struct mem_cgroup *__mem_cgroup_iter_next(struct mem_cgroup *root,
-               struct mem_cgroup *last_visited, mem_cgroup_iter_filter cond)
+               struct mem_cgroup *last_visited)
 {
        struct cgroup_subsys_state *prev_css, *next_css;
 
@@ -963,31 +1080,11 @@ skip_node:
        if (next_css) {
                struct mem_cgroup *mem = mem_cgroup_from_css(next_css);
 
-               switch (mem_cgroup_filter(mem, root, cond)) {
-               case SKIP:
+               if (css_tryget(&mem->css))
+                       return mem;
+               else {
                        prev_css = next_css;
                        goto skip_node;
-               case SKIP_TREE:
-                       if (mem == root)
-                               return NULL;
-                       /*
-                        * css_rightmost_descendant is not an optimal way to
-                        * skip through a subtree (especially for imbalanced
-                        * trees leaning to right) but that's what we have right
-                        * now. More effective solution would be traversing
-                        * right-up for first non-NULL without calling
-                        * css_next_descendant_pre afterwards.
-                        */
-                       prev_css = css_rightmost_descendant(next_css);
-                       goto skip_node;
-               case VISIT:
-                       if (css_tryget(&mem->css))
-                               return mem;
-                       else {
-                               prev_css = next_css;
-                               goto skip_node;
-                       }
-                       break;
                }
        }
 
@@ -1051,7 +1148,6 @@ static void mem_cgroup_iter_update(struct mem_cgroup_reclaim_iter *iter,
  * @root: hierarchy root
  * @prev: previously returned memcg, NULL on first invocation
  * @reclaim: cookie for shared reclaim walks, NULL for full walks
- * @cond: filter for visited nodes, NULL for no filter
  *
  * Returns references to children of the hierarchy below @root, or
  * @root itself, or %NULL after a full round-trip.
@@ -1064,18 +1160,15 @@ static void mem_cgroup_iter_update(struct mem_cgroup_reclaim_iter *iter,
  * divide up the memcgs in the hierarchy among all concurrent
  * reclaimers operating on the same zone and priority.
  */
-struct mem_cgroup *mem_cgroup_iter_cond(struct mem_cgroup *root,
+struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
                                   struct mem_cgroup *prev,
-                                  struct mem_cgroup_reclaim_cookie *reclaim,
-                                  mem_cgroup_iter_filter cond)
+                                  struct mem_cgroup_reclaim_cookie *reclaim)
 {
        struct mem_cgroup *memcg = NULL;
        struct mem_cgroup *last_visited = NULL;
 
-       if (mem_cgroup_disabled()) {
-               /* first call must return non-NULL, second return NULL */
-               return (struct mem_cgroup *)(unsigned long)!prev;
-       }
+       if (mem_cgroup_disabled())
+               return NULL;
 
        if (!root)
                root = root_mem_cgroup;
@@ -1086,9 +1179,7 @@ struct mem_cgroup *mem_cgroup_iter_cond(struct mem_cgroup *root,
        if (!root->use_hierarchy && root != root_mem_cgroup) {
                if (prev)
                        goto out_css_put;
-               if (mem_cgroup_filter(root, root, cond) == VISIT)
-                       return root;
-               return NULL;
+               return root;
        }
 
        rcu_read_lock();
@@ -1111,7 +1202,7 @@ struct mem_cgroup *mem_cgroup_iter_cond(struct mem_cgroup *root,
                        last_visited = mem_cgroup_iter_load(iter, root, &seq);
                }
 
-               memcg = __mem_cgroup_iter_next(root, last_visited, cond);
+               memcg = __mem_cgroup_iter_next(root, last_visited);
 
                if (reclaim) {
                        mem_cgroup_iter_update(iter, last_visited, memcg, seq);
@@ -1122,11 +1213,7 @@ struct mem_cgroup *mem_cgroup_iter_cond(struct mem_cgroup *root,
                                reclaim->generation = iter->generation;
                }
 
-               /*
-                * We have finished the whole tree walk or no group has been
-                * visited because filter told us to skip the root node.
-                */
-               if (!memcg && (prev || (cond && !last_visited)))
+               if (prev && !memcg)
                        goto out_unlock;
        }
 out_unlock:
@@ -1767,7 +1854,6 @@ static unsigned long mem_cgroup_reclaim(struct mem_cgroup *memcg,
        return total;
 }
 
-#if MAX_NUMNODES > 1
 /**
  * test_mem_cgroup_node_reclaimable
  * @memcg: the target memcg
@@ -1790,6 +1876,7 @@ static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg,
        return false;
 
 }
+#if MAX_NUMNODES > 1
 
 /*
  * Always updating the nodemask is not very good - even if we have an empty
@@ -1857,50 +1944,104 @@ int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
        return node;
 }
 
+/*
+ * Check all nodes whether it contains reclaimable pages or not.
+ * For quick scan, we make use of scan_nodes. This will allow us to skip
+ * unused nodes. But scan_nodes is lazily updated and may not cotain
+ * enough new information. We need to do double check.
+ */
+static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
+{
+       int nid;
+
+       /*
+        * quick check...making use of scan_node.
+        * We can skip unused nodes.
+        */
+       if (!nodes_empty(memcg->scan_nodes)) {
+               for (nid = first_node(memcg->scan_nodes);
+                    nid < MAX_NUMNODES;
+                    nid = next_node(nid, memcg->scan_nodes)) {
+
+                       if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap))
+                               return true;
+               }
+       }
+       /*
+        * Check rest of nodes.
+        */
+       for_each_node_state(nid, N_MEMORY) {
+               if (node_isset(nid, memcg->scan_nodes))
+                       continue;
+               if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap))
+                       return true;
+       }
+       return false;
+}
+
 #else
 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
 {
        return 0;
 }
 
-#endif
-
-/*
- * A group is eligible for the soft limit reclaim under the given root
- * hierarchy if
- *     a) it is over its soft limit
- *     b) any parent up the hierarchy is over its soft limit
- *
- * If the given group doesn't have any children over the limit then it
- * doesn't make any sense to iterate its subtree.
- */
-enum mem_cgroup_filter_t
-mem_cgroup_soft_reclaim_eligible(struct mem_cgroup *memcg,
-               struct mem_cgroup *root)
+static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
 {
-       struct mem_cgroup *parent;
-
-       if (!memcg)
-               memcg = root_mem_cgroup;
-       parent = memcg;
-
-       if (res_counter_soft_limit_excess(&memcg->res))
-               return VISIT;
+       return test_mem_cgroup_node_reclaimable(memcg, 0, noswap);
+}
+#endif
 
-       /*
-        * If any parent up to the root in the hierarchy is over its soft limit
-        * then we have to obey and reclaim from this group as well.
-        */
-       while ((parent = parent_mem_cgroup(parent))) {
-               if (res_counter_soft_limit_excess(&parent->res))
-                       return VISIT;
-               if (parent == root)
+static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
+                                  struct zone *zone,
+                                  gfp_t gfp_mask,
+                                  unsigned long *total_scanned)
+{
+       struct mem_cgroup *victim = NULL;
+       int total = 0;
+       int loop = 0;
+       unsigned long excess;
+       unsigned long nr_scanned;
+       struct mem_cgroup_reclaim_cookie reclaim = {
+               .zone = zone,
+               .priority = 0,
+       };
+
+       excess = res_counter_soft_limit_excess(&root_memcg->res) >> PAGE_SHIFT;
+
+       while (1) {
+               victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
+               if (!victim) {
+                       loop++;
+                       if (loop >= 2) {
+                               /*
+                                * If we have not been able to reclaim
+                                * anything, it might because there are
+                                * no reclaimable pages under this hierarchy
+                                */
+                               if (!total)
+                                       break;
+                               /*
+                                * We want to do more targeted reclaim.
+                                * excess >> 2 is not to excessive so as to
+                                * reclaim too much, nor too less that we keep
+                                * coming back to reclaim from this cgroup
+                                */
+                               if (total >= (excess >> 2) ||
+                                       (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
+                                       break;
+                       }
+                       continue;
+               }
+               if (!mem_cgroup_reclaimable(victim, false))
+                       continue;
+               total += mem_cgroup_shrink_node_zone(victim, gfp_mask, false,
+                                                    zone, &nr_scanned);
+               *total_scanned += nr_scanned;
+               if (!res_counter_soft_limit_excess(&root_memcg->res))
                        break;
        }
-
-       if (!atomic_read(&memcg->children_in_excess))
-               return SKIP_TREE;
-       return SKIP;
+       mem_cgroup_iter_break(root_memcg, victim);
+       return total;
 }
 
 static DEFINE_SPINLOCK(memcg_oom_lock);
@@ -2812,7 +2953,9 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
        unlock_page_cgroup(pc);
 
        /*
-        * "charge_statistics" updated event counter.
+        * "charge_statistics" updated event counter. Then, check it.
+        * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
+        * if they exceeds softlimit.
         */
        memcg_check_events(memcg, page);
 }
@@ -4647,6 +4790,98 @@ static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
        return ret;
 }
 
+unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
+                                           gfp_t gfp_mask,
+                                           unsigned long *total_scanned)
+{
+       unsigned long nr_reclaimed = 0;
+       struct mem_cgroup_per_zone *mz, *next_mz = NULL;
+       unsigned long reclaimed;
+       int loop = 0;
+       struct mem_cgroup_tree_per_zone *mctz;
+       unsigned long long excess;
+       unsigned long nr_scanned;
+
+       if (order > 0)
+               return 0;
+
+       mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone));
+       /*
+        * This loop can run a while, specially if mem_cgroup's continuously
+        * keep exceeding their soft limit and putting the system under
+        * pressure
+        */
+       do {
+               if (next_mz)
+                       mz = next_mz;
+               else
+                       mz = mem_cgroup_largest_soft_limit_node(mctz);
+               if (!mz)
+                       break;
+
+               nr_scanned = 0;
+               reclaimed = mem_cgroup_soft_reclaim(mz->memcg, zone,
+                                                   gfp_mask, &nr_scanned);
+               nr_reclaimed += reclaimed;
+               *total_scanned += nr_scanned;
+               spin_lock(&mctz->lock);
+
+               /*
+                * If we failed to reclaim anything from this memory cgroup
+                * it is time to move on to the next cgroup
+                */
+               next_mz = NULL;
+               if (!reclaimed) {
+                       do {
+                               /*
+                                * Loop until we find yet another one.
+                                *
+                                * By the time we get the soft_limit lock
+                                * again, someone might have aded the
+                                * group back on the RB tree. Iterate to
+                                * make sure we get a different mem.
+                                * mem_cgroup_largest_soft_limit_node returns
+                                * NULL if no other cgroup is present on
+                                * the tree
+                                */
+                               next_mz =
+                               __mem_cgroup_largest_soft_limit_node(mctz);
+                               if (next_mz == mz)
+                                       css_put(&next_mz->memcg->css);
+                               else /* next_mz == NULL or other memcg */
+                                       break;
+                       } while (1);
+               }
+               __mem_cgroup_remove_exceeded(mz->memcg, mz, mctz);
+               excess = res_counter_soft_limit_excess(&mz->memcg->res);
+               /*
+                * One school of thought says that we should not add
+                * back the node to the tree if reclaim returns 0.
+                * But our reclaim could return 0, simply because due
+                * to priority we are exposing a smaller subset of
+                * memory to reclaim from. Consider this as a longer
+                * term TODO.
+                */
+               /* If excess == 0, no tree ops */
+               __mem_cgroup_insert_exceeded(mz->memcg, mz, mctz, excess);
+               spin_unlock(&mctz->lock);
+               css_put(&mz->memcg->css);
+               loop++;
+               /*
+                * Could not reclaim anything and there are no more
+                * mem cgroups to try or we seem to be looping without
+                * reclaiming anything.
+                */
+               if (!nr_reclaimed &&
+                       (next_mz == NULL ||
+                       loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
+                       break;
+       } while (!nr_reclaimed);
+       if (next_mz)
+               css_put(&next_mz->memcg->css);
+       return nr_reclaimed;
+}
+
 /**
  * mem_cgroup_force_empty_list - clears LRU of a group
  * @memcg: group to clear
@@ -5911,6 +6146,8 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
        for (zone = 0; zone < MAX_NR_ZONES; zone++) {
                mz = &pn->zoneinfo[zone];
                lruvec_init(&mz->lruvec);
+               mz->usage_in_excess = 0;
+               mz->on_tree = false;
                mz->memcg = memcg;
        }
        memcg->nodeinfo[node] = pn;
@@ -5966,6 +6203,7 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
        int node;
        size_t size = memcg_size();
 
+       mem_cgroup_remove_from_trees(memcg);
        free_css_id(&mem_cgroup_subsys, &memcg->css);
 
        for_each_node(node)
@@ -6002,6 +6240,29 @@ struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
 }
 EXPORT_SYMBOL(parent_mem_cgroup);
 
+static void __init mem_cgroup_soft_limit_tree_init(void)
+{
+       struct mem_cgroup_tree_per_node *rtpn;
+       struct mem_cgroup_tree_per_zone *rtpz;
+       int tmp, node, zone;
+
+       for_each_node(node) {
+               tmp = node;
+               if (!node_state(node, N_NORMAL_MEMORY))
+                       tmp = -1;
+               rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp);
+               BUG_ON(!rtpn);
+
+               soft_limit_tree.rb_tree_per_node[node] = rtpn;
+
+               for (zone = 0; zone < MAX_NR_ZONES; zone++) {
+                       rtpz = &rtpn->rb_tree_per_zone[zone];
+                       rtpz->rb_root = RB_ROOT;
+                       spin_lock_init(&rtpz->lock);
+               }
+       }
+}
+
 static struct cgroup_subsys_state * __ref
 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
 {
@@ -6031,7 +6292,6 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
        mutex_init(&memcg->thresholds_lock);
        spin_lock_init(&memcg->move_lock);
        vmpressure_init(&memcg->vmpressure);
-       spin_lock_init(&memcg->soft_lock);
 
        return &memcg->css;
 
@@ -6109,13 +6369,6 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
 
        mem_cgroup_invalidate_reclaim_iterators(memcg);
        mem_cgroup_reparent_charges(memcg);
-       if (memcg->soft_contributed) {
-               while ((memcg = parent_mem_cgroup(memcg)))
-                       atomic_dec(&memcg->children_in_excess);
-
-               if (memcg != root_mem_cgroup && !root_mem_cgroup->use_hierarchy)
-                       atomic_dec(&root_mem_cgroup->children_in_excess);
-       }
        mem_cgroup_destroy_all_caches(memcg);
        vmpressure_cleanup(&memcg->vmpressure);
 }
@@ -6790,6 +7043,7 @@ static int __init mem_cgroup_init(void)
 {
        hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
        enable_swap_cgroup();
+       mem_cgroup_soft_limit_tree_init();
        memcg_stock_init();
        return 0;
 }
index 947ed5413279261a830eeaeb42d9392aea0f8fa8..bf3351b5115e54915a3d7eaa718d10a9771b2c5f 100644 (file)
@@ -1114,8 +1114,10 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
                         * shake_page could have turned it free.
                         */
                        if (is_free_buddy_page(p)) {
-                               action_result(pfn, "free buddy, 2nd try",
-                                               DELAYED);
+                               if (flags & MF_COUNT_INCREASED)
+                                       action_result(pfn, "free buddy", DELAYED);
+                               else
+                                       action_result(pfn, "free buddy, 2nd try", DELAYED);
                                return 0;
                        }
                        action_result(pfn, "non LRU", IGNORED);
@@ -1349,7 +1351,7 @@ int unpoison_memory(unsigned long pfn)
         * worked by memory_failure() and the page lock is not held yet.
         * In such case, we yield to memory_failure() and make unpoison fail.
         */
-       if (PageTransHuge(page)) {
+       if (!PageHuge(page) && PageTransHuge(page)) {
                pr_info("MCE: Memory failure is now running on %#lx\n", pfn);
                        return 0;
        }
index 9c8d5f59d30bb87e63c9990c085eb646e69b4ed7..a26bccd44ccb0a907662c08135399462bd816b9c 100644 (file)
@@ -107,7 +107,7 @@ void putback_movable_pages(struct list_head *l)
                list_del(&page->lru);
                dec_zone_page_state(page, NR_ISOLATED_ANON +
                                page_is_file_cache(page));
-               if (unlikely(balloon_page_movable(page)))
+               if (unlikely(isolated_balloon_page(page)))
                        balloon_page_putback(page);
                else
                        putback_lru_page(page);
index d63802663242eb6ad13ca2ed065434f24fd7e5d1..d480cd6fc475854259bdd51021d5125dbdfbe479 100644 (file)
@@ -379,10 +379,14 @@ static unsigned long __munlock_pagevec_fill(struct pagevec *pvec,
 
        /*
         * Initialize pte walk starting at the already pinned page where we
-        * are sure that there is a pte.
+        * are sure that there is a pte, as it was pinned under the same
+        * mmap_sem write op.
         */
        pte = get_locked_pte(vma->vm_mm, start, &ptl);
-       end = min(end, pmd_addr_end(start, end));
+       /* Make sure we do not cross the page table boundary */
+       end = pgd_addr_end(start, end);
+       end = pud_addr_end(start, end);
+       end = pmd_addr_end(start, end);
 
        /* The page next to the pinned page is the first we will try to get */
        start += PAGE_SIZE;
@@ -736,6 +740,7 @@ static int do_mlockall(int flags)
 
                /* Ignore errors */
                mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags);
+               cond_resched();
        }
 out:
        return 0;
index 0ee638f76ebe584cd40d7541bac886b96b03162e..dd886fac451ab6ab7d6a3132fd2587c10538f300 100644 (file)
@@ -6366,10 +6366,6 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
                list_del(&page->lru);
                rmv_page_order(page);
                zone->free_area[order].nr_free--;
-#ifdef CONFIG_HIGHMEM
-               if (PageHighMem(page))
-                       totalhigh_pages -= 1 << order;
-#endif
                for (i = 0; i < (1 << order); i++)
                        SetPageReserved((page+i));
                pfn += (1 << order);
index 8ed1b775bdc9cafe9aaf2ab74e4acf842ffe1a9d..53f2f82f83ae0d16bf19646cdb5b3bce5fc4e4cf 100644 (file)
@@ -48,6 +48,7 @@
 #include <asm/div64.h>
 
 #include <linux/swapops.h>
+#include <linux/balloon_compaction.h>
 
 #include "internal.h"
 
@@ -139,23 +140,11 @@ static bool global_reclaim(struct scan_control *sc)
 {
        return !sc->target_mem_cgroup;
 }
-
-static bool mem_cgroup_should_soft_reclaim(struct scan_control *sc)
-{
-       struct mem_cgroup *root = sc->target_mem_cgroup;
-       return !mem_cgroup_disabled() &&
-               mem_cgroup_soft_reclaim_eligible(root, root) != SKIP_TREE;
-}
 #else
 static bool global_reclaim(struct scan_control *sc)
 {
        return true;
 }
-
-static bool mem_cgroup_should_soft_reclaim(struct scan_control *sc)
-{
-       return false;
-}
 #endif
 
 unsigned long zone_reclaimable_pages(struct zone *zone)
@@ -1125,7 +1114,8 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
        LIST_HEAD(clean_pages);
 
        list_for_each_entry_safe(page, next, page_list, lru) {
-               if (page_is_file_cache(page) && !PageDirty(page)) {
+               if (page_is_file_cache(page) && !PageDirty(page) &&
+                   !isolated_balloon_page(page)) {
                        ClearPageActive(page);
                        list_move(&page->lru, &clean_pages);
                }
@@ -2176,11 +2166,9 @@ static inline bool should_continue_reclaim(struct zone *zone,
        }
 }
 
-static int
-__shrink_zone(struct zone *zone, struct scan_control *sc, bool soft_reclaim)
+static void shrink_zone(struct zone *zone, struct scan_control *sc)
 {
        unsigned long nr_reclaimed, nr_scanned;
-       int groups_scanned = 0;
 
        do {
                struct mem_cgroup *root = sc->target_mem_cgroup;
@@ -2188,17 +2176,15 @@ __shrink_zone(struct zone *zone, struct scan_control *sc, bool soft_reclaim)
                        .zone = zone,
                        .priority = sc->priority,
                };
-               struct mem_cgroup *memcg = NULL;
-               mem_cgroup_iter_filter filter = (soft_reclaim) ?
-                       mem_cgroup_soft_reclaim_eligible : NULL;
+               struct mem_cgroup *memcg;
 
                nr_reclaimed = sc->nr_reclaimed;
                nr_scanned = sc->nr_scanned;
 
-               while ((memcg = mem_cgroup_iter_cond(root, memcg, &reclaim, filter))) {
+               memcg = mem_cgroup_iter(root, NULL, &reclaim);
+               do {
                        struct lruvec *lruvec;
 
-                       groups_scanned++;
                        lruvec = mem_cgroup_zone_lruvec(zone, memcg);
 
                        shrink_lruvec(lruvec, sc);
@@ -2218,7 +2204,8 @@ __shrink_zone(struct zone *zone, struct scan_control *sc, bool soft_reclaim)
                                mem_cgroup_iter_break(root, memcg);
                                break;
                        }
-               }
+                       memcg = mem_cgroup_iter(root, memcg, &reclaim);
+               } while (memcg);
 
                vmpressure(sc->gfp_mask, sc->target_mem_cgroup,
                           sc->nr_scanned - nr_scanned,
@@ -2226,37 +2213,6 @@ __shrink_zone(struct zone *zone, struct scan_control *sc, bool soft_reclaim)
 
        } while (should_continue_reclaim(zone, sc->nr_reclaimed - nr_reclaimed,
                                         sc->nr_scanned - nr_scanned, sc));
-
-       return groups_scanned;
-}
-
-
-static void shrink_zone(struct zone *zone, struct scan_control *sc)
-{
-       bool do_soft_reclaim = mem_cgroup_should_soft_reclaim(sc);
-       unsigned long nr_scanned = sc->nr_scanned;
-       int scanned_groups;
-
-       scanned_groups = __shrink_zone(zone, sc, do_soft_reclaim);
-       /*
-        * memcg iterator might race with other reclaimer or start from
-        * a incomplete tree walk so the tree walk in __shrink_zone
-        * might have missed groups that are above the soft limit. Try
-        * another loop to catch up with others. Do it just once to
-        * prevent from reclaim latencies when other reclaimers always
-        * preempt this one.
-        */
-       if (do_soft_reclaim && !scanned_groups)
-               __shrink_zone(zone, sc, do_soft_reclaim);
-
-       /*
-        * No group is over the soft limit or those that are do not have
-        * pages in the zone we are reclaiming so we have to reclaim everybody
-        */
-       if (do_soft_reclaim && (sc->nr_scanned == nr_scanned)) {
-               __shrink_zone(zone, sc, false);
-               return;
-       }
 }
 
 /* Returns true if compaction should go ahead for a high-order request */
@@ -2320,6 +2276,8 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
 {
        struct zoneref *z;
        struct zone *zone;
+       unsigned long nr_soft_reclaimed;
+       unsigned long nr_soft_scanned;
        bool aborted_reclaim = false;
 
        /*
@@ -2359,6 +2317,18 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
                                        continue;
                                }
                        }
+                       /*
+                        * This steals pages from memory cgroups over softlimit
+                        * and returns the number of reclaimed pages and
+                        * scanned pages. This works for global memory pressure
+                        * and balancing, not for a memcg's limit.
+                        */
+                       nr_soft_scanned = 0;
+                       nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
+                                               sc->order, sc->gfp_mask,
+                                               &nr_soft_scanned);
+                       sc->nr_reclaimed += nr_soft_reclaimed;
+                       sc->nr_scanned += nr_soft_scanned;
                        /* need some check for avoid more shrink_zone() */
                }
 
@@ -2952,6 +2922,8 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
 {
        int i;
        int end_zone = 0;       /* Inclusive.  0 = ZONE_DMA */
+       unsigned long nr_soft_reclaimed;
+       unsigned long nr_soft_scanned;
        struct scan_control sc = {
                .gfp_mask = GFP_KERNEL,
                .priority = DEF_PRIORITY,
@@ -3066,6 +3038,15 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
 
                        sc.nr_scanned = 0;
 
+                       nr_soft_scanned = 0;
+                       /*
+                        * Call soft limit reclaim before calling shrink_zone.
+                        */
+                       nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
+                                                       order, sc.gfp_mask,
+                                                       &nr_soft_scanned);
+                       sc.nr_reclaimed += nr_soft_reclaimed;
+
                        /*
                         * There should be no need to raise the scanning
                         * priority if enough pages are already being scanned
index 1eb05d80b07bea736e85a389be0d98c8bfcb3d9c..3ed616215870cf73b4d8a516d52e5da04d9472e5 100644 (file)
 static unsigned int mrp_join_time __read_mostly = 200;
 module_param(mrp_join_time, uint, 0644);
 MODULE_PARM_DESC(mrp_join_time, "Join time in ms (default 200ms)");
+
+static unsigned int mrp_periodic_time __read_mostly = 1000;
+module_param(mrp_periodic_time, uint, 0644);
+MODULE_PARM_DESC(mrp_periodic_time, "Periodic time in ms (default 1s)");
+
 MODULE_LICENSE("GPL");
 
 static const u8
@@ -595,6 +600,24 @@ static void mrp_join_timer(unsigned long data)
        mrp_join_timer_arm(app);
 }
 
+static void mrp_periodic_timer_arm(struct mrp_applicant *app)
+{
+       mod_timer(&app->periodic_timer,
+                 jiffies + msecs_to_jiffies(mrp_periodic_time));
+}
+
+static void mrp_periodic_timer(unsigned long data)
+{
+       struct mrp_applicant *app = (struct mrp_applicant *)data;
+
+       spin_lock(&app->lock);
+       mrp_mad_event(app, MRP_EVENT_PERIODIC);
+       mrp_pdu_queue(app);
+       spin_unlock(&app->lock);
+
+       mrp_periodic_timer_arm(app);
+}
+
 static int mrp_pdu_parse_end_mark(struct sk_buff *skb, int *offset)
 {
        __be16 endmark;
@@ -845,6 +868,9 @@ int mrp_init_applicant(struct net_device *dev, struct mrp_application *appl)
        rcu_assign_pointer(dev->mrp_port->applicants[appl->type], app);
        setup_timer(&app->join_timer, mrp_join_timer, (unsigned long)app);
        mrp_join_timer_arm(app);
+       setup_timer(&app->periodic_timer, mrp_periodic_timer,
+                   (unsigned long)app);
+       mrp_periodic_timer_arm(app);
        return 0;
 
 err3:
@@ -870,6 +896,7 @@ void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *appl)
         * all pending messages before the applicant is gone.
         */
        del_timer_sync(&app->join_timer);
+       del_timer_sync(&app->periodic_timer);
 
        spin_lock_bh(&app->lock);
        mrp_mad_event(app, MRP_EVENT_TX);
index 61fc573f1142f707fee7d886e67153a62c91c14a..b3d17d1c49c3fbecdd5701da18c14c9767f7d969 100644 (file)
@@ -98,14 +98,14 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
                vlan_gvrp_request_leave(dev);
 
        vlan_group_set_device(grp, vlan->vlan_proto, vlan_id, NULL);
+
+       netdev_upper_dev_unlink(real_dev, dev);
        /* Because unregister_netdevice_queue() makes sure at least one rcu
         * grace period is respected before device freeing,
         * we dont need to call synchronize_net() here.
         */
        unregister_netdevice_queue(dev, head);
 
-       netdev_upper_dev_unlink(real_dev, dev);
-
        if (grp->nr_vlan_devs == 0) {
                vlan_mvrp_uninit_applicant(real_dev);
                vlan_gvrp_uninit_applicant(real_dev);
@@ -169,13 +169,13 @@ int register_vlan_dev(struct net_device *dev)
        if (err < 0)
                goto out_uninit_mvrp;
 
-       err = netdev_upper_dev_link(real_dev, dev);
-       if (err)
-               goto out_uninit_mvrp;
-
        err = register_netdevice(dev);
        if (err < 0)
-               goto out_upper_dev_unlink;
+               goto out_uninit_mvrp;
+
+       err = netdev_upper_dev_link(real_dev, dev);
+       if (err)
+               goto out_unregister_netdev;
 
        /* Account for reference in struct vlan_dev_priv */
        dev_hold(real_dev);
@@ -191,8 +191,8 @@ int register_vlan_dev(struct net_device *dev)
 
        return 0;
 
-out_upper_dev_unlink:
-       netdev_upper_dev_unlink(real_dev, dev);
+out_unregister_netdev:
+       unregister_netdevice(dev);
 out_uninit_mvrp:
        if (grp->nr_vlan_devs == 0)
                vlan_mvrp_uninit_applicant(real_dev);
index 634debab4d54582f04c69a4664b00c7541696a93..fb7356fcfe51e03664d7aed6458eafd3015634e5 100644 (file)
@@ -1146,7 +1146,11 @@ int hci_dev_open(__u16 dev)
                goto done;
        }
 
-       if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
+       /* Check for rfkill but allow the HCI setup stage to proceed
+        * (which in itself doesn't cause any RF activity).
+        */
+       if (test_bit(HCI_RFKILLED, &hdev->dev_flags) &&
+           !test_bit(HCI_SETUP, &hdev->dev_flags)) {
                ret = -ERFKILL;
                goto done;
        }
@@ -1566,10 +1570,13 @@ static int hci_rfkill_set_block(void *data, bool blocked)
 
        BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
 
-       if (!blocked)
-               return 0;
-
-       hci_dev_do_close(hdev);
+       if (blocked) {
+               set_bit(HCI_RFKILLED, &hdev->dev_flags);
+               if (!test_bit(HCI_SETUP, &hdev->dev_flags))
+                       hci_dev_do_close(hdev);
+       } else {
+               clear_bit(HCI_RFKILLED, &hdev->dev_flags);
+       }
 
        return 0;
 }
@@ -1591,9 +1598,13 @@ static void hci_power_on(struct work_struct *work)
                return;
        }
 
-       if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
+       if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
+               clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
+               hci_dev_do_close(hdev);
+       } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
                queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
                                   HCI_AUTO_OFF_TIMEOUT);
+       }
 
        if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
                mgmt_index_added(hdev);
@@ -2209,6 +2220,9 @@ int hci_register_dev(struct hci_dev *hdev)
                }
        }
 
+       if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
+               set_bit(HCI_RFKILLED, &hdev->dev_flags);
+
        set_bit(HCI_SETUP, &hdev->dev_flags);
 
        if (hdev->dev_type != HCI_AMP)
index 94aab73f89d4c9e447d2b65f12f8ec71ee1a66e6..8db3e89fae354aebb67c6ea7172a3e2e1926b66d 100644 (file)
@@ -3557,7 +3557,11 @@ static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
        cp.handle = cpu_to_le16(conn->handle);
 
        if (ltk->authenticated)
-               conn->sec_level = BT_SECURITY_HIGH;
+               conn->pending_sec_level = BT_SECURITY_HIGH;
+       else
+               conn->pending_sec_level = BT_SECURITY_MEDIUM;
+
+       conn->enc_key_size = ltk->enc_size;
 
        hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
 
index b3bb7bca8e606439edbd9f9838bb8021200bb28c..63fa11109a1c391725d5efec2075c1524d9f1a1a 100644 (file)
@@ -3755,6 +3755,13 @@ static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
 
        sk = chan->sk;
 
+       /* For certain devices (ex: HID mouse), support for authentication,
+        * pairing and bonding is optional. For such devices, inorder to avoid
+        * the ACL alive for too long after L2CAP disconnection, reset the ACL
+        * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
+        */
+       conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
+
        bacpy(&bt_sk(sk)->src, conn->src);
        bacpy(&bt_sk(sk)->dst, conn->dst);
        chan->psm  = psm;
index 6d126faf145fe5107fbd0dfd9b810671c34e1e66..84fcf9fff3ea52e4235b7e478deb487fb075b53c 100644 (file)
@@ -569,7 +569,6 @@ static void rfcomm_dev_data_ready(struct rfcomm_dlc *dlc, struct sk_buff *skb)
 static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err)
 {
        struct rfcomm_dev *dev = dlc->owner;
-       struct tty_struct *tty;
        if (!dev)
                return;
 
@@ -581,38 +580,8 @@ static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err)
                            DPM_ORDER_DEV_AFTER_PARENT);
 
                wake_up_interruptible(&dev->port.open_wait);
-       } else if (dlc->state == BT_CLOSED) {
-               tty = tty_port_tty_get(&dev->port);
-               if (!tty) {
-                       if (test_bit(RFCOMM_RELEASE_ONHUP, &dev->flags)) {
-                               /* Drop DLC lock here to avoid deadlock
-                                * 1. rfcomm_dev_get will take rfcomm_dev_lock
-                                *    but in rfcomm_dev_add there's lock order:
-                                *    rfcomm_dev_lock -> dlc lock
-                                * 2. tty_port_put will deadlock if it's
-                                *    the last reference
-                                *
-                                * FIXME: when we release the lock anything
-                                * could happen to dev, even its destruction
-                                */
-                               rfcomm_dlc_unlock(dlc);
-                               if (rfcomm_dev_get(dev->id) == NULL) {
-                                       rfcomm_dlc_lock(dlc);
-                                       return;
-                               }
-
-                               if (!test_and_set_bit(RFCOMM_TTY_RELEASED,
-                                                     &dev->flags))
-                                       tty_port_put(&dev->port);
-
-                               tty_port_put(&dev->port);
-                               rfcomm_dlc_lock(dlc);
-                       }
-               } else {
-                       tty_hangup(tty);
-                       tty_kref_put(tty);
-               }
-       }
+       } else if (dlc->state == BT_CLOSED)
+               tty_port_tty_hangup(&dev->port, false);
 }
 
 static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig)
index d1c5786306784a7353b845cad60e2d39abec0f72..005d876dd86cc89f8e7cf0f43d369661fcd6834e 100644 (file)
@@ -363,7 +363,7 @@ static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br,
        skb_reset_mac_header(skb);
        eth = eth_hdr(skb);
 
-       memcpy(eth->h_source, br->dev->dev_addr, 6);
+       memcpy(eth->h_source, br->dev->dev_addr, ETH_ALEN);
        eth->h_dest[0] = 1;
        eth->h_dest[1] = 0;
        eth->h_dest[2] = 0x5e;
@@ -433,7 +433,7 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
        skb_reset_mac_header(skb);
        eth = eth_hdr(skb);
 
-       memcpy(eth->h_source, br->dev->dev_addr, 6);
+       memcpy(eth->h_source, br->dev->dev_addr, ETH_ALEN);
        eth->h_proto = htons(ETH_P_IPV6);
        skb_put(skb, sizeof(*eth));
 
index 8b84c581be3082ea4c8a6a21a362a3077ab17751..3fb3c848affef74249a1fd9ed610aea7d1db5764 100644 (file)
@@ -28,7 +28,7 @@ static bool ebt_mac_wormhash_contains(const struct ebt_mac_wormhash *wh,
        uint32_t cmp[2] = { 0, 0 };
        int key = ((const unsigned char *)mac)[5];
 
-       memcpy(((char *) cmp) + 2, mac, 6);
+       memcpy(((char *) cmp) + 2, mac, ETH_ALEN);
        start = wh->table[key];
        limit = wh->table[key + 1];
        if (ip) {
index 5c713f2239cc6245d230d2e35e243bbee7339761..c25db20a424662600e93bf856aa6a61c81508b8e 100644 (file)
@@ -4373,42 +4373,40 @@ struct netdev_adjacent {
        /* upper master flag, there can only be one master device per list */
        bool master;
 
-       /* indicates that this dev is our first-level lower/upper device */
-       bool neighbour;
-
        /* counter for the number of times this device was added to us */
        u16 ref_nr;
 
+       /* private field for the users */
+       void *private;
+
        struct list_head list;
        struct rcu_head rcu;
 };
 
-static struct netdev_adjacent *__netdev_find_adj(struct net_device *dev,
-                                                struct net_device *adj_dev,
-                                                bool upper)
+static struct netdev_adjacent *__netdev_find_adj_rcu(struct net_device *dev,
+                                                    struct net_device *adj_dev,
+                                                    struct list_head *adj_list)
 {
        struct netdev_adjacent *adj;
-       struct list_head *dev_list;
-
-       dev_list = upper ? &dev->upper_dev_list : &dev->lower_dev_list;
 
-       list_for_each_entry(adj, dev_list, list) {
+       list_for_each_entry_rcu(adj, adj_list, list) {
                if (adj->dev == adj_dev)
                        return adj;
        }
        return NULL;
 }
 
-static inline struct netdev_adjacent *__netdev_find_upper(struct net_device *dev,
-                                                         struct net_device *udev)
+static struct netdev_adjacent *__netdev_find_adj(struct net_device *dev,
+                                                struct net_device *adj_dev,
+                                                struct list_head *adj_list)
 {
-       return __netdev_find_adj(dev, udev, true);
-}
+       struct netdev_adjacent *adj;
 
-static inline struct netdev_adjacent *__netdev_find_lower(struct net_device *dev,
-                                                         struct net_device *ldev)
-{
-       return __netdev_find_adj(dev, ldev, false);
+       list_for_each_entry(adj, adj_list, list) {
+               if (adj->dev == adj_dev)
+                       return adj;
+       }
+       return NULL;
 }
 
 /**
@@ -4425,7 +4423,7 @@ bool netdev_has_upper_dev(struct net_device *dev,
 {
        ASSERT_RTNL();
 
-       return __netdev_find_upper(dev, upper_dev);
+       return __netdev_find_adj(dev, upper_dev, &dev->all_adj_list.upper);
 }
 EXPORT_SYMBOL(netdev_has_upper_dev);
 
@@ -4440,7 +4438,7 @@ bool netdev_has_any_upper_dev(struct net_device *dev)
 {
        ASSERT_RTNL();
 
-       return !list_empty(&dev->upper_dev_list);
+       return !list_empty(&dev->all_adj_list.upper);
 }
 EXPORT_SYMBOL(netdev_has_any_upper_dev);
 
@@ -4457,10 +4455,10 @@ struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
 
        ASSERT_RTNL();
 
-       if (list_empty(&dev->upper_dev_list))
+       if (list_empty(&dev->adj_list.upper))
                return NULL;
 
-       upper = list_first_entry(&dev->upper_dev_list,
+       upper = list_first_entry(&dev->adj_list.upper,
                                 struct netdev_adjacent, list);
        if (likely(upper->master))
                return upper->dev;
@@ -4468,15 +4466,26 @@ struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
 }
 EXPORT_SYMBOL(netdev_master_upper_dev_get);
 
-/* netdev_upper_get_next_dev_rcu - Get the next dev from upper list
+void *netdev_adjacent_get_private(struct list_head *adj_list)
+{
+       struct netdev_adjacent *adj;
+
+       adj = list_entry(adj_list, struct netdev_adjacent, list);
+
+       return adj->private;
+}
+EXPORT_SYMBOL(netdev_adjacent_get_private);
+
+/**
+ * netdev_all_upper_get_next_dev_rcu - Get the next dev from upper list
  * @dev: device
  * @iter: list_head ** of the current position
  *
  * Gets the next device from the dev's upper list, starting from iter
  * position. The caller must hold RCU read lock.
  */
-struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
-                                                struct list_head **iter)
+struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
+                                                    struct list_head **iter)
 {
        struct netdev_adjacent *upper;
 
@@ -4484,14 +4493,71 @@ struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
 
        upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
 
-       if (&upper->list == &dev->upper_dev_list)
+       if (&upper->list == &dev->all_adj_list.upper)
                return NULL;
 
        *iter = &upper->list;
 
        return upper->dev;
 }
-EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
+EXPORT_SYMBOL(netdev_all_upper_get_next_dev_rcu);
+
+/**
+ * netdev_lower_get_next_private - Get the next ->private from the
+ *                                lower neighbour list
+ * @dev: device
+ * @iter: list_head ** of the current position
+ *
+ * Gets the next netdev_adjacent->private from the dev's lower neighbour
+ * list, starting from iter position. The caller must hold either hold the
+ * RTNL lock or its own locking that guarantees that the neighbour lower
+ * list will remain unchainged.
+ */
+void *netdev_lower_get_next_private(struct net_device *dev,
+                                   struct list_head **iter)
+{
+       struct netdev_adjacent *lower;
+
+       lower = list_entry(*iter, struct netdev_adjacent, list);
+
+       if (&lower->list == &dev->adj_list.lower)
+               return NULL;
+
+       if (iter)
+               *iter = lower->list.next;
+
+       return lower->private;
+}
+EXPORT_SYMBOL(netdev_lower_get_next_private);
+
+/**
+ * netdev_lower_get_next_private_rcu - Get the next ->private from the
+ *                                    lower neighbour list, RCU
+ *                                    variant
+ * @dev: device
+ * @iter: list_head ** of the current position
+ *
+ * Gets the next netdev_adjacent->private from the dev's lower neighbour
+ * list, starting from iter position. The caller must hold RCU read lock.
+ */
+void *netdev_lower_get_next_private_rcu(struct net_device *dev,
+                                       struct list_head **iter)
+{
+       struct netdev_adjacent *lower;
+
+       WARN_ON_ONCE(!rcu_read_lock_held());
+
+       lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
+
+       if (&lower->list == &dev->adj_list.lower)
+               return NULL;
+
+       if (iter)
+               *iter = &lower->list;
+
+       return lower->private;
+}
+EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
 
 /**
  * netdev_master_upper_dev_get_rcu - Get master upper device
@@ -4504,7 +4570,7 @@ struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
 {
        struct netdev_adjacent *upper;
 
-       upper = list_first_or_null_rcu(&dev->upper_dev_list,
+       upper = list_first_or_null_rcu(&dev->adj_list.upper,
                                       struct netdev_adjacent, list);
        if (upper && likely(upper->master))
                return upper->dev;
@@ -4514,15 +4580,16 @@ EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
 
 static int __netdev_adjacent_dev_insert(struct net_device *dev,
                                        struct net_device *adj_dev,
-                                       bool neighbour, bool master,
-                                       bool upper)
+                                       struct list_head *dev_list,
+                                       void *private, bool master)
 {
        struct netdev_adjacent *adj;
+       char linkname[IFNAMSIZ+7];
+       int ret;
 
-       adj = __netdev_find_adj(dev, adj_dev, upper);
+       adj = __netdev_find_adj(dev, adj_dev, dev_list);
 
        if (adj) {
-               BUG_ON(neighbour);
                adj->ref_nr++;
                return 0;
        }
@@ -4533,124 +4600,178 @@ static int __netdev_adjacent_dev_insert(struct net_device *dev,
 
        adj->dev = adj_dev;
        adj->master = master;
-       adj->neighbour = neighbour;
        adj->ref_nr = 1;
-
+       adj->private = private;
        dev_hold(adj_dev);
-       pr_debug("dev_hold for %s, because of %s link added from %s to %s\n",
-                adj_dev->name, upper ? "upper" : "lower", dev->name,
-                adj_dev->name);
 
-       if (!upper) {
-               list_add_tail_rcu(&adj->list, &dev->lower_dev_list);
-               return 0;
+       pr_debug("dev_hold for %s, because of link added from %s to %s\n",
+                adj_dev->name, dev->name, adj_dev->name);
+
+       if (dev_list == &dev->adj_list.lower) {
+               sprintf(linkname, "lower_%s", adj_dev->name);
+               ret = sysfs_create_link(&(dev->dev.kobj),
+                                       &(adj_dev->dev.kobj), linkname);
+               if (ret)
+                       goto free_adj;
+       } else if (dev_list == &dev->adj_list.upper) {
+               sprintf(linkname, "upper_%s", adj_dev->name);
+               ret = sysfs_create_link(&(dev->dev.kobj),
+                                       &(adj_dev->dev.kobj), linkname);
+               if (ret)
+                       goto free_adj;
        }
 
-       /* Ensure that master upper link is always the first item in list. */
-       if (master)
-               list_add_rcu(&adj->list, &dev->upper_dev_list);
-       else
-               list_add_tail_rcu(&adj->list, &dev->upper_dev_list);
+       /* Ensure that master link is always the first item in list. */
+       if (master) {
+               ret = sysfs_create_link(&(dev->dev.kobj),
+                                       &(adj_dev->dev.kobj), "master");
+               if (ret)
+                       goto remove_symlinks;
+
+               list_add_rcu(&adj->list, dev_list);
+       } else {
+               list_add_tail_rcu(&adj->list, dev_list);
+       }
 
        return 0;
-}
 
-static inline int __netdev_upper_dev_insert(struct net_device *dev,
-                                           struct net_device *udev,
-                                           bool master, bool neighbour)
-{
-       return __netdev_adjacent_dev_insert(dev, udev, neighbour, master,
-                                           true);
-}
+remove_symlinks:
+       if (dev_list == &dev->adj_list.lower) {
+               sprintf(linkname, "lower_%s", adj_dev->name);
+               sysfs_remove_link(&(dev->dev.kobj), linkname);
+       } else if (dev_list == &dev->adj_list.upper) {
+               sprintf(linkname, "upper_%s", adj_dev->name);
+               sysfs_remove_link(&(dev->dev.kobj), linkname);
+       }
 
-static inline int __netdev_lower_dev_insert(struct net_device *dev,
-                                           struct net_device *ldev,
-                                           bool neighbour)
-{
-       return __netdev_adjacent_dev_insert(dev, ldev, neighbour, false,
-                                           false);
+free_adj:
+       kfree(adj);
+
+       return ret;
 }
 
 void __netdev_adjacent_dev_remove(struct net_device *dev,
-                                 struct net_device *adj_dev, bool upper)
+                                 struct net_device *adj_dev,
+                                 struct list_head *dev_list)
 {
        struct netdev_adjacent *adj;
+       char linkname[IFNAMSIZ+7];
 
-       if (upper)
-               adj = __netdev_find_upper(dev, adj_dev);
-       else
-               adj = __netdev_find_lower(dev, adj_dev);
+       adj = __netdev_find_adj(dev, adj_dev, dev_list);
 
-       if (!adj)
+       if (!adj) {
+               pr_err("tried to remove device %s from %s\n",
+                      dev->name, adj_dev->name);
                BUG();
+       }
 
        if (adj->ref_nr > 1) {
+               pr_debug("%s to %s ref_nr-- = %d\n", dev->name, adj_dev->name,
+                        adj->ref_nr-1);
                adj->ref_nr--;
                return;
        }
 
+       if (adj->master)
+               sysfs_remove_link(&(dev->dev.kobj), "master");
+
+       if (dev_list == &dev->adj_list.lower) {
+               sprintf(linkname, "lower_%s", adj_dev->name);
+               sysfs_remove_link(&(dev->dev.kobj), linkname);
+       } else if (dev_list == &dev->adj_list.upper) {
+               sprintf(linkname, "upper_%s", adj_dev->name);
+               sysfs_remove_link(&(dev->dev.kobj), linkname);
+       }
+
        list_del_rcu(&adj->list);
-       pr_debug("dev_put for %s, because of %s link removed from %s to %s\n",
-                adj_dev->name, upper ? "upper" : "lower", dev->name,
-                adj_dev->name);
+       pr_debug("dev_put for %s, because link removed from %s to %s\n",
+                adj_dev->name, dev->name, adj_dev->name);
        dev_put(adj_dev);
        kfree_rcu(adj, rcu);
 }
 
-static inline void __netdev_upper_dev_remove(struct net_device *dev,
-                                            struct net_device *udev)
-{
-       return __netdev_adjacent_dev_remove(dev, udev, true);
-}
-
-static inline void __netdev_lower_dev_remove(struct net_device *dev,
-                                            struct net_device *ldev)
-{
-       return __netdev_adjacent_dev_remove(dev, ldev, false);
-}
-
-int __netdev_adjacent_dev_insert_link(struct net_device *dev,
-                                     struct net_device *upper_dev,
-                                     bool master, bool neighbour)
+int __netdev_adjacent_dev_link_lists(struct net_device *dev,
+                                    struct net_device *upper_dev,
+                                    struct list_head *up_list,
+                                    struct list_head *down_list,
+                                    void *private, bool master)
 {
        int ret;
 
-       ret = __netdev_upper_dev_insert(dev, upper_dev, master, neighbour);
+       ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list, private,
+                                          master);
        if (ret)
                return ret;
 
-       ret = __netdev_lower_dev_insert(upper_dev, dev, neighbour);
+       ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list, private,
+                                          false);
        if (ret) {
-               __netdev_upper_dev_remove(dev, upper_dev);
+               __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
                return ret;
        }
 
        return 0;
 }
 
-static inline int __netdev_adjacent_dev_link(struct net_device *dev,
-                                            struct net_device *udev)
+int __netdev_adjacent_dev_link(struct net_device *dev,
+                              struct net_device *upper_dev)
 {
-       return __netdev_adjacent_dev_insert_link(dev, udev, false, false);
+       return __netdev_adjacent_dev_link_lists(dev, upper_dev,
+                                               &dev->all_adj_list.upper,
+                                               &upper_dev->all_adj_list.lower,
+                                               NULL, false);
 }
 
-static inline int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
-                                                      struct net_device *udev,
-                                                      bool master)
+void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
+                                       struct net_device *upper_dev,
+                                       struct list_head *up_list,
+                                       struct list_head *down_list)
 {
-       return __netdev_adjacent_dev_insert_link(dev, udev, master, true);
+       __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
+       __netdev_adjacent_dev_remove(upper_dev, dev, down_list);
 }
 
 void __netdev_adjacent_dev_unlink(struct net_device *dev,
                                  struct net_device *upper_dev)
 {
-       __netdev_upper_dev_remove(dev, upper_dev);
-       __netdev_lower_dev_remove(upper_dev, dev);
+       __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
+                                          &dev->all_adj_list.upper,
+                                          &upper_dev->all_adj_list.lower);
 }
 
+int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
+                                        struct net_device *upper_dev,
+                                        void *private, bool master)
+{
+       int ret = __netdev_adjacent_dev_link(dev, upper_dev);
+
+       if (ret)
+               return ret;
+
+       ret = __netdev_adjacent_dev_link_lists(dev, upper_dev,
+                                              &dev->adj_list.upper,
+                                              &upper_dev->adj_list.lower,
+                                              private, master);
+       if (ret) {
+               __netdev_adjacent_dev_unlink(dev, upper_dev);
+               return ret;
+       }
+
+       return 0;
+}
+
+void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
+                                           struct net_device *upper_dev)
+{
+       __netdev_adjacent_dev_unlink(dev, upper_dev);
+       __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
+                                          &dev->adj_list.upper,
+                                          &upper_dev->adj_list.lower);
+}
 
 static int __netdev_upper_dev_link(struct net_device *dev,
-                                  struct net_device *upper_dev, bool master)
+                                  struct net_device *upper_dev, bool master,
+                                  void *private)
 {
        struct netdev_adjacent *i, *j, *to_i, *to_j;
        int ret = 0;
@@ -4661,26 +4782,29 @@ static int __netdev_upper_dev_link(struct net_device *dev,
                return -EBUSY;
 
        /* To prevent loops, check if dev is not upper device to upper_dev. */
-       if (__netdev_find_upper(upper_dev, dev))
+       if (__netdev_find_adj(upper_dev, dev, &upper_dev->all_adj_list.upper))
                return -EBUSY;
 
-       if (__netdev_find_upper(dev, upper_dev))
+       if (__netdev_find_adj(dev, upper_dev, &dev->all_adj_list.upper))
                return -EEXIST;
 
        if (master && netdev_master_upper_dev_get(dev))
                return -EBUSY;
 
-       ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, master);
+       ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, private,
+                                                  master);
        if (ret)
                return ret;
 
        /* Now that we linked these devs, make all the upper_dev's
-        * upper_dev_list visible to every dev's lower_dev_list and vice
+        * all_adj_list.upper visible to every dev's all_adj_list.lower an
         * versa, and don't forget the devices itself. All of these
         * links are non-neighbours.
         */
-       list_for_each_entry(i, &dev->lower_dev_list, list) {
-               list_for_each_entry(j, &upper_dev->upper_dev_list, list) {
+       list_for_each_entry(i, &dev->all_adj_list.lower, list) {
+               list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
+                       pr_debug("Interlinking %s with %s, non-neighbour\n",
+                                i->dev->name, j->dev->name);
                        ret = __netdev_adjacent_dev_link(i->dev, j->dev);
                        if (ret)
                                goto rollback_mesh;
@@ -4688,14 +4812,18 @@ static int __netdev_upper_dev_link(struct net_device *dev,
        }
 
        /* add dev to every upper_dev's upper device */
-       list_for_each_entry(i, &upper_dev->upper_dev_list, list) {
+       list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
+               pr_debug("linking %s's upper device %s with %s\n",
+                        upper_dev->name, i->dev->name, dev->name);
                ret = __netdev_adjacent_dev_link(dev, i->dev);
                if (ret)
                        goto rollback_upper_mesh;
        }
 
        /* add upper_dev to every dev's lower device */
-       list_for_each_entry(i, &dev->lower_dev_list, list) {
+       list_for_each_entry(i, &dev->all_adj_list.lower, list) {
+               pr_debug("linking %s's lower device %s with %s\n", dev->name,
+                        i->dev->name, upper_dev->name);
                ret = __netdev_adjacent_dev_link(i->dev, upper_dev);
                if (ret)
                        goto rollback_lower_mesh;
@@ -4706,7 +4834,7 @@ static int __netdev_upper_dev_link(struct net_device *dev,
 
 rollback_lower_mesh:
        to_i = i;
-       list_for_each_entry(i, &dev->lower_dev_list, list) {
+       list_for_each_entry(i, &dev->all_adj_list.lower, list) {
                if (i == to_i)
                        break;
                __netdev_adjacent_dev_unlink(i->dev, upper_dev);
@@ -4716,7 +4844,7 @@ rollback_lower_mesh:
 
 rollback_upper_mesh:
        to_i = i;
-       list_for_each_entry(i, &upper_dev->upper_dev_list, list) {
+       list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
                if (i == to_i)
                        break;
                __netdev_adjacent_dev_unlink(dev, i->dev);
@@ -4727,8 +4855,8 @@ rollback_upper_mesh:
 rollback_mesh:
        to_i = i;
        to_j = j;
-       list_for_each_entry(i, &dev->lower_dev_list, list) {
-               list_for_each_entry(j, &upper_dev->upper_dev_list, list) {
+       list_for_each_entry(i, &dev->all_adj_list.lower, list) {
+               list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
                        if (i == to_i && j == to_j)
                                break;
                        __netdev_adjacent_dev_unlink(i->dev, j->dev);
@@ -4737,7 +4865,7 @@ rollback_mesh:
                        break;
        }
 
-       __netdev_adjacent_dev_unlink(dev, upper_dev);
+       __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
 
        return ret;
 }
@@ -4755,7 +4883,7 @@ rollback_mesh:
 int netdev_upper_dev_link(struct net_device *dev,
                          struct net_device *upper_dev)
 {
-       return __netdev_upper_dev_link(dev, upper_dev, false);
+       return __netdev_upper_dev_link(dev, upper_dev, false, NULL);
 }
 EXPORT_SYMBOL(netdev_upper_dev_link);
 
@@ -4773,10 +4901,18 @@ EXPORT_SYMBOL(netdev_upper_dev_link);
 int netdev_master_upper_dev_link(struct net_device *dev,
                                 struct net_device *upper_dev)
 {
-       return __netdev_upper_dev_link(dev, upper_dev, true);
+       return __netdev_upper_dev_link(dev, upper_dev, true, NULL);
 }
 EXPORT_SYMBOL(netdev_master_upper_dev_link);
 
+int netdev_master_upper_dev_link_private(struct net_device *dev,
+                                        struct net_device *upper_dev,
+                                        void *private)
+{
+       return __netdev_upper_dev_link(dev, upper_dev, true, private);
+}
+EXPORT_SYMBOL(netdev_master_upper_dev_link_private);
+
 /**
  * netdev_upper_dev_unlink - Removes a link to upper device
  * @dev: device
@@ -4791,29 +4927,59 @@ void netdev_upper_dev_unlink(struct net_device *dev,
        struct netdev_adjacent *i, *j;
        ASSERT_RTNL();
 
-       __netdev_adjacent_dev_unlink(dev, upper_dev);
+       __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
 
        /* Here is the tricky part. We must remove all dev's lower
         * devices from all upper_dev's upper devices and vice
         * versa, to maintain the graph relationship.
         */
-       list_for_each_entry(i, &dev->lower_dev_list, list)
-               list_for_each_entry(j, &upper_dev->upper_dev_list, list)
+       list_for_each_entry(i, &dev->all_adj_list.lower, list)
+               list_for_each_entry(j, &upper_dev->all_adj_list.upper, list)
                        __netdev_adjacent_dev_unlink(i->dev, j->dev);
 
        /* remove also the devices itself from lower/upper device
         * list
         */
-       list_for_each_entry(i, &dev->lower_dev_list, list)
+       list_for_each_entry(i, &dev->all_adj_list.lower, list)
                __netdev_adjacent_dev_unlink(i->dev, upper_dev);
 
-       list_for_each_entry(i, &upper_dev->upper_dev_list, list)
+       list_for_each_entry(i, &upper_dev->all_adj_list.upper, list)
                __netdev_adjacent_dev_unlink(dev, i->dev);
 
        call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev);
 }
 EXPORT_SYMBOL(netdev_upper_dev_unlink);
 
+void *netdev_lower_dev_get_private_rcu(struct net_device *dev,
+                                      struct net_device *lower_dev)
+{
+       struct netdev_adjacent *lower;
+
+       if (!lower_dev)
+               return NULL;
+       lower = __netdev_find_adj_rcu(dev, lower_dev, &dev->adj_list.lower);
+       if (!lower)
+               return NULL;
+
+       return lower->private;
+}
+EXPORT_SYMBOL(netdev_lower_dev_get_private_rcu);
+
+void *netdev_lower_dev_get_private(struct net_device *dev,
+                                  struct net_device *lower_dev)
+{
+       struct netdev_adjacent *lower;
+
+       if (!lower_dev)
+               return NULL;
+       lower = __netdev_find_adj(dev, lower_dev, &dev->adj_list.lower);
+       if (!lower)
+               return NULL;
+
+       return lower->private;
+}
+EXPORT_SYMBOL(netdev_lower_dev_get_private);
+
 static void dev_change_rx_flags(struct net_device *dev, int flags)
 {
        const struct net_device_ops *ops = dev->netdev_ops;
@@ -4822,7 +4988,7 @@ static void dev_change_rx_flags(struct net_device *dev, int flags)
                ops->ndo_change_rx_flags(dev, flags);
 }
 
-static int __dev_set_promiscuity(struct net_device *dev, int inc)
+static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
 {
        unsigned int old_flags = dev->flags;
        kuid_t uid;
@@ -4865,6 +5031,8 @@ static int __dev_set_promiscuity(struct net_device *dev, int inc)
 
                dev_change_rx_flags(dev, IFF_PROMISC);
        }
+       if (notify)
+               __dev_notify_flags(dev, old_flags, IFF_PROMISC);
        return 0;
 }
 
@@ -4884,7 +5052,7 @@ int dev_set_promiscuity(struct net_device *dev, int inc)
        unsigned int old_flags = dev->flags;
        int err;
 
-       err = __dev_set_promiscuity(dev, inc);
+       err = __dev_set_promiscuity(dev, inc, true);
        if (err < 0)
                return err;
        if (dev->flags != old_flags)
@@ -4893,22 +5061,9 @@ int dev_set_promiscuity(struct net_device *dev, int inc)
 }
 EXPORT_SYMBOL(dev_set_promiscuity);
 
-/**
- *     dev_set_allmulti        - update allmulti count on a device
- *     @dev: device
- *     @inc: modifier
- *
- *     Add or remove reception of all multicast frames to a device. While the
- *     count in the device remains above zero the interface remains listening
- *     to all interfaces. Once it hits zero the device reverts back to normal
- *     filtering operation. A negative @inc value is used to drop the counter
- *     when releasing a resource needing all multicasts.
- *     Return 0 if successful or a negative errno code on error.
- */
-
-int dev_set_allmulti(struct net_device *dev, int inc)
+static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
 {
-       unsigned int old_flags = dev->flags;
+       unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
 
        ASSERT_RTNL();
 
@@ -4931,9 +5086,30 @@ int dev_set_allmulti(struct net_device *dev, int inc)
        if (dev->flags ^ old_flags) {
                dev_change_rx_flags(dev, IFF_ALLMULTI);
                dev_set_rx_mode(dev);
+               if (notify)
+                       __dev_notify_flags(dev, old_flags,
+                                          dev->gflags ^ old_gflags);
        }
        return 0;
 }
+
+/**
+ *     dev_set_allmulti        - update allmulti count on a device
+ *     @dev: device
+ *     @inc: modifier
+ *
+ *     Add or remove reception of all multicast frames to a device. While the
+ *     count in the device remains above zero the interface remains listening
+ *     to all interfaces. Once it hits zero the device reverts back to normal
+ *     filtering operation. A negative @inc value is used to drop the counter
+ *     when releasing a resource needing all multicasts.
+ *     Return 0 if successful or a negative errno code on error.
+ */
+
+int dev_set_allmulti(struct net_device *dev, int inc)
+{
+       return __dev_set_allmulti(dev, inc, true);
+}
 EXPORT_SYMBOL(dev_set_allmulti);
 
 /*
@@ -4958,10 +5134,10 @@ void __dev_set_rx_mode(struct net_device *dev)
                 * therefore calling __dev_set_promiscuity here is safe.
                 */
                if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
-                       __dev_set_promiscuity(dev, 1);
+                       __dev_set_promiscuity(dev, 1, false);
                        dev->uc_promisc = true;
                } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
-                       __dev_set_promiscuity(dev, -1);
+                       __dev_set_promiscuity(dev, -1, false);
                        dev->uc_promisc = false;
                }
        }
@@ -5050,9 +5226,13 @@ int __dev_change_flags(struct net_device *dev, unsigned int flags)
 
        if ((flags ^ dev->gflags) & IFF_PROMISC) {
                int inc = (flags & IFF_PROMISC) ? 1 : -1;
+               unsigned int old_flags = dev->flags;
 
                dev->gflags ^= IFF_PROMISC;
-               dev_set_promiscuity(dev, inc);
+
+               if (__dev_set_promiscuity(dev, inc, false) >= 0)
+                       if (dev->flags != old_flags)
+                               dev_set_rx_mode(dev);
        }
 
        /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
@@ -5063,16 +5243,20 @@ int __dev_change_flags(struct net_device *dev, unsigned int flags)
                int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
 
                dev->gflags ^= IFF_ALLMULTI;
-               dev_set_allmulti(dev, inc);
+               __dev_set_allmulti(dev, inc, false);
        }
 
        return ret;
 }
 
-void __dev_notify_flags(struct net_device *dev, unsigned int old_flags)
+void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
+                       unsigned int gchanges)
 {
        unsigned int changes = dev->flags ^ old_flags;
 
+       if (gchanges)
+               rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges);
+
        if (changes & IFF_UP) {
                if (dev->flags & IFF_UP)
                        call_netdevice_notifiers(NETDEV_UP, dev);
@@ -5101,17 +5285,14 @@ void __dev_notify_flags(struct net_device *dev, unsigned int old_flags)
 int dev_change_flags(struct net_device *dev, unsigned int flags)
 {
        int ret;
-       unsigned int changes, old_flags = dev->flags;
+       unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
 
        ret = __dev_change_flags(dev, flags);
        if (ret < 0)
                return ret;
 
-       changes = old_flags ^ dev->flags;
-       if (changes)
-               rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
-
-       __dev_notify_flags(dev, old_flags);
+       changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
+       __dev_notify_flags(dev, old_flags, changes);
        return ret;
 }
 EXPORT_SYMBOL(dev_change_flags);
@@ -5247,10 +5428,12 @@ static int dev_new_index(struct net *net)
 
 /* Delayed registration/unregisteration */
 static LIST_HEAD(net_todo_list);
+static DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
 
 static void net_set_todo(struct net_device *dev)
 {
        list_add_tail(&dev->todo_list, &net_todo_list);
+       dev_net(dev)->dev_unreg_count++;
 }
 
 static void rollback_registered_many(struct list_head *head)
@@ -5918,6 +6101,12 @@ void netdev_run_todo(void)
                if (dev->destructor)
                        dev->destructor(dev);
 
+               /* Report a network device has been unregistered */
+               rtnl_lock();
+               dev_net(dev)->dev_unreg_count--;
+               __rtnl_unlock();
+               wake_up(&netdev_unregistering_wq);
+
                /* Free network device */
                kobject_put(&dev->dev.kobj);
        }
@@ -6069,8 +6258,10 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
        INIT_LIST_HEAD(&dev->napi_list);
        INIT_LIST_HEAD(&dev->unreg_list);
        INIT_LIST_HEAD(&dev->link_watch_list);
-       INIT_LIST_HEAD(&dev->upper_dev_list);
-       INIT_LIST_HEAD(&dev->lower_dev_list);
+       INIT_LIST_HEAD(&dev->adj_list.upper);
+       INIT_LIST_HEAD(&dev->adj_list.lower);
+       INIT_LIST_HEAD(&dev->all_adj_list.upper);
+       INIT_LIST_HEAD(&dev->all_adj_list.lower);
        dev->priv_flags = IFF_XMIT_DST_RELEASE;
        setup(dev);
 
@@ -6603,6 +6794,34 @@ static void __net_exit default_device_exit(struct net *net)
        rtnl_unlock();
 }
 
+static void __net_exit rtnl_lock_unregistering(struct list_head *net_list)
+{
+       /* Return with the rtnl_lock held when there are no network
+        * devices unregistering in any network namespace in net_list.
+        */
+       struct net *net;
+       bool unregistering;
+       DEFINE_WAIT(wait);
+
+       for (;;) {
+               prepare_to_wait(&netdev_unregistering_wq, &wait,
+                               TASK_UNINTERRUPTIBLE);
+               unregistering = false;
+               rtnl_lock();
+               list_for_each_entry(net, net_list, exit_list) {
+                       if (net->dev_unreg_count > 0) {
+                               unregistering = true;
+                               break;
+                       }
+               }
+               if (!unregistering)
+                       break;
+               __rtnl_unlock();
+               schedule();
+       }
+       finish_wait(&netdev_unregistering_wq, &wait);
+}
+
 static void __net_exit default_device_exit_batch(struct list_head *net_list)
 {
        /* At exit all network devices most be removed from a network
@@ -6614,7 +6833,18 @@ static void __net_exit default_device_exit_batch(struct list_head *net_list)
        struct net *net;
        LIST_HEAD(dev_kill_list);
 
-       rtnl_lock();
+       /* To prevent network device cleanup code from dereferencing
+        * loopback devices or network devices that have been freed
+        * wait here for all pending unregistrations to complete,
+        * before unregistring the loopback device and allowing the
+        * network namespace be freed.
+        *
+        * The netdev todo list containing all network devices
+        * unregistrations that happen in default_device_exit_batch
+        * will run in the rtnl_unlock() at the end of
+        * default_device_exit_batch.
+        */
+       rtnl_lock_unregistering(net_list);
        list_for_each_entry(net, net_list, exit_list) {
                for_each_netdev_reverse(net, dev) {
                        if (dev->rtnl_link_ops)
index 1929af87b2609650d0b484e7d01dc4a405ba8389..f8e25ac41c6c1b7b2eebadc6ec8d8310c458bfee 100644 (file)
@@ -25,9 +25,35 @@ static void iph_to_flow_copy_addrs(struct flow_keys *flow, const struct iphdr *i
        memcpy(&flow->src, &iph->saddr, sizeof(flow->src) + sizeof(flow->dst));
 }
 
+/**
+ * skb_flow_get_ports - extract the upper layer ports and return them
+ * @skb: buffer to extract the ports from
+ * @thoff: transport header offset
+ * @ip_proto: protocol for which to get port offset
+ *
+ * The function will try to retrieve the ports at offset thoff + poff where poff
+ * is the protocol port offset returned from proto_ports_offset
+ */
+__be32 skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto)
+{
+       int poff = proto_ports_offset(ip_proto);
+
+       if (poff >= 0) {
+               __be32 *ports, _ports;
+
+               ports = skb_header_pointer(skb, thoff + poff,
+                                          sizeof(_ports), &_ports);
+               if (ports)
+                       return *ports;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(skb_flow_get_ports);
+
 bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow)
 {
-       int poff, nhoff = skb_network_offset(skb);
+       int nhoff = skb_network_offset(skb);
        u8 ip_proto;
        __be16 proto = skb->protocol;
 
@@ -150,16 +176,7 @@ ipv6:
        }
 
        flow->ip_proto = ip_proto;
-       poff = proto_ports_offset(ip_proto);
-       if (poff >= 0) {
-               __be32 *ports, _ports;
-
-               nhoff += poff;
-               ports = skb_header_pointer(skb, nhoff, sizeof(_ports), &_ports);
-               if (ports)
-                       flow->ports = *ports;
-       }
-
+       flow->ports = skb_flow_get_ports(skb, nhoff, ip_proto);
        flow->thoff = (u16) nhoff;
 
        return true;
index 6072610a8672d1a54a0e7618214f70610fa2a6fa..ca15f32821fb8d536586354108488050b7cc6a0e 100644 (file)
@@ -867,7 +867,7 @@ static void neigh_invalidate(struct neighbour *neigh)
 static void neigh_probe(struct neighbour *neigh)
        __releases(neigh->lock)
 {
-       struct sk_buff *skb = skb_peek(&neigh->arp_queue);
+       struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue);
        /* keep skb alive even if arp_queue overflows */
        if (skb)
                skb = skb_copy(skb, GFP_ATOMIC);
index 2a0e21de3060cddbc9cd657acb24ee84b748c4d1..4aedf03da0521433f16b470f3e675adcd4efdf25 100644 (file)
@@ -1647,9 +1647,8 @@ int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm)
        }
 
        dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
-       rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
 
-       __dev_notify_flags(dev, old_flags);
+       __dev_notify_flags(dev, old_flags, ~0U);
        return 0;
 }
 EXPORT_SYMBOL(rtnl_configure_link);
index 6a2f13cee86a0c3f34f69832d4b6d93592fd5327..3f1ec1586ae174d9bea18de2bcada31c3c88a5bb 100644 (file)
 
 #include <net/secure_seq.h>
 
-static u32 net_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned;
+#define NET_SECRET_SIZE (MD5_MESSAGE_BYTES / 4)
 
-void net_secret_init(void)
+static u32 net_secret[NET_SECRET_SIZE] ____cacheline_aligned;
+
+static void net_secret_init(void)
 {
-       get_random_bytes(net_secret, sizeof(net_secret));
+       u32 tmp;
+       int i;
+
+       if (likely(net_secret[0]))
+               return;
+
+       for (i = NET_SECRET_SIZE; i > 0;) {
+               do {
+                       get_random_bytes(&tmp, sizeof(tmp));
+               } while (!tmp);
+               cmpxchg(&net_secret[--i], 0, tmp);
+       }
 }
 
 #ifdef CONFIG_INET
@@ -42,6 +55,7 @@ __u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr,
        u32 hash[MD5_DIGEST_WORDS];
        u32 i;
 
+       net_secret_init();
        memcpy(hash, saddr, 16);
        for (i = 0; i < 4; i++)
                secret[i] = net_secret[i] + (__force u32)daddr[i];
@@ -63,6 +77,7 @@ u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
        u32 hash[MD5_DIGEST_WORDS];
        u32 i;
 
+       net_secret_init();
        memcpy(hash, saddr, 16);
        for (i = 0; i < 4; i++)
                secret[i] = net_secret[i] + (__force u32) daddr[i];
@@ -82,6 +97,7 @@ __u32 secure_ip_id(__be32 daddr)
 {
        u32 hash[MD5_DIGEST_WORDS];
 
+       net_secret_init();
        hash[0] = (__force __u32) daddr;
        hash[1] = net_secret[13];
        hash[2] = net_secret[14];
@@ -96,6 +112,7 @@ __u32 secure_ipv6_id(const __be32 daddr[4])
 {
        __u32 hash[4];
 
+       net_secret_init();
        memcpy(hash, daddr, 16);
        md5_transform(hash, net_secret);
 
@@ -107,6 +124,7 @@ __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
 {
        u32 hash[MD5_DIGEST_WORDS];
 
+       net_secret_init();
        hash[0] = (__force u32)saddr;
        hash[1] = (__force u32)daddr;
        hash[2] = ((__force u16)sport << 16) + (__force u16)dport;
@@ -121,6 +139,7 @@ u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport)
 {
        u32 hash[MD5_DIGEST_WORDS];
 
+       net_secret_init();
        hash[0] = (__force u32)saddr;
        hash[1] = (__force u32)daddr;
        hash[2] = (__force u32)dport ^ net_secret[14];
@@ -140,6 +159,7 @@ u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
        u32 hash[MD5_DIGEST_WORDS];
        u64 seq;
 
+       net_secret_init();
        hash[0] = (__force u32)saddr;
        hash[1] = (__force u32)daddr;
        hash[2] = ((__force u16)sport << 16) + (__force u16)dport;
@@ -164,6 +184,7 @@ u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
        u64 seq;
        u32 i;
 
+       net_secret_init();
        memcpy(hash, saddr, 16);
        for (i = 0; i < 4; i++)
                secret[i] = net_secret[i] + daddr[i];
index 5b6beba494a350cb28adfc7724487f1a13e6c011..2bd9b3faa0d0ee1c970c1dcfbf4549e424f0078e 100644 (file)
@@ -914,6 +914,13 @@ set_rcvbuf:
                }
                break;
 #endif
+
+       case SO_MAX_PACING_RATE:
+               sk->sk_max_pacing_rate = val;
+               sk->sk_pacing_rate = min(sk->sk_pacing_rate,
+                                        sk->sk_max_pacing_rate);
+               break;
+
        default:
                ret = -ENOPROTOOPT;
                break;
@@ -1177,6 +1184,10 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
                break;
 #endif
 
+       case SO_MAX_PACING_RATE:
+               v.val = sk->sk_max_pacing_rate;
+               break;
+
        default:
                return -ENOPROTOOPT;
        }
@@ -2319,6 +2330,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
        sk->sk_ll_usec          =       sysctl_net_busy_read;
 #endif
 
+       sk->sk_max_pacing_rate = ~0U;
        /*
         * Before updating sk_refcnt, we must commit prior changes to memory
         * (Documentation/RCU/rculist_nulls.txt for details)
index 9bd3c31c30e5f6f0cf6c9195586cd26787b539bd..8f032bae60ad8fdc8e284c5cd5ae7501660fcf50 100644 (file)
@@ -169,20 +169,9 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
                else
                        skb->pkt_type = PACKET_MULTICAST;
        }
-
-       /*
-        *      This ALLMULTI check should be redundant by 1.4
-        *      so don't forget to remove it.
-        *
-        *      Seems, you forgot to remove it. All silly devices
-        *      seems to set IFF_PROMISC.
-        */
-
-       else if (1 /*dev->flags&IFF_PROMISC */) {
-               if (unlikely(!ether_addr_equal_64bits(eth->h_dest,
-                                                     dev->dev_addr)))
-                       skb->pkt_type = PACKET_OTHERHOST;
-       }
+       else if (unlikely(!ether_addr_equal_64bits(eth->h_dest,
+                                                  dev->dev_addr)))
+               skb->pkt_type = PACKET_OTHERHOST;
 
        /*
         * Some variants of DSA tagging don't have an ethertype field
@@ -190,12 +179,13 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
         * variants has been configured on the receiving interface,
         * and if so, set skb->protocol without looking at the packet.
         */
-       if (netdev_uses_dsa_tags(dev))
+       if (unlikely(netdev_uses_dsa_tags(dev)))
                return htons(ETH_P_DSA);
-       if (netdev_uses_trailer_tags(dev))
+
+       if (unlikely(netdev_uses_trailer_tags(dev)))
                return htons(ETH_P_TRAILER);
 
-       if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN)
+       if (likely(ntohs(eth->h_proto) >= ETH_P_802_3_MIN))
                return eth->h_proto;
 
        /*
@@ -204,7 +194,7 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
         *      layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This
         *      won't work for fault tolerant netware but does for the rest.
         */
-       if (skb->len >= 2 && *(unsigned short *)(skb->data) == 0xFFFF)
+       if (unlikely(skb->len >= 2 && *(unsigned short *)(skb->data) == 0xFFFF))
                return htons(ETH_P_802_3);
 
        /*
index 7a1874b7b8fd4431b6cc7d383ae5e96923dbdda3..cfeb85cff4f02abc28570b267ba6e64784595fab 100644 (file)
@@ -263,10 +263,8 @@ void build_ehash_secret(void)
                get_random_bytes(&rnd, sizeof(rnd));
        } while (rnd == 0);
 
-       if (cmpxchg(&inet_ehash_secret, 0, rnd) == 0) {
+       if (cmpxchg(&inet_ehash_secret, 0, rnd) == 0)
                get_random_bytes(&ipv6_hash_secret, sizeof(ipv6_hash_secret));
-               net_secret_init();
-       }
 }
 EXPORT_SYMBOL(build_ehash_secret);
 
index 3df6d3edb2a15a98cb0e90a4e5ed935f42f15f1c..45c74ba039709681e48a219163a74aec737995f5 100644 (file)
@@ -762,12 +762,9 @@ static struct tnode *inflate(struct trie *t, struct tnode *tn)
 
                if (IS_LEAF(node) || ((struct tnode *) node)->pos >
                   tn->pos + tn->bits - 1) {
-                       if (tkey_extract_bits(node->key,
-                                             oldtnode->pos + oldtnode->bits,
-                                             1) == 0)
-                               put_child(tn, 2*i, node);
-                       else
-                               put_child(tn, 2*i+1, node);
+                       put_child(tn,
+                               tkey_extract_bits(node->key, oldtnode->pos, oldtnode->bits + 1),
+                               node);
                        continue;
                }
 
index 5f7d11a458713f9c755dd1a1a40289b180a3e041..5c0e8bc6e5ba275d2469336533b5848de94ae6c1 100644 (file)
@@ -353,6 +353,9 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
        saddr = fib_compute_spec_dst(skb);
        ipc.opt = NULL;
        ipc.tx_flags = 0;
+       ipc.ttl = 0;
+       ipc.tos = -1;
+
        if (icmp_param->replyopts.opt.opt.optlen) {
                ipc.opt = &icmp_param->replyopts.opt;
                if (ipc.opt->opt.srr)
@@ -608,6 +611,8 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
        ipc.addr = iph->saddr;
        ipc.opt = &icmp_param->replyopts.opt;
        ipc.tx_flags = 0;
+       ipc.ttl = 0;
+       ipc.tos = -1;
 
        rt = icmp_route_lookup(net, &fl4, skb_in, iph, saddr, tos,
                               type, code, icmp_param);
index dace87f06e5f9bf22ed99d078d058ea8f1339f4a..7defdc9ba16744fd263c539c37f125fc31422c87 100644 (file)
@@ -736,7 +736,7 @@ static void igmp_gq_timer_expire(unsigned long data)
 
        in_dev->mr_gq_running = 0;
        igmpv3_send_report(in_dev, NULL);
-       __in_dev_put(in_dev);
+       in_dev_put(in_dev);
 }
 
 static void igmp_ifc_timer_expire(unsigned long data)
@@ -749,7 +749,7 @@ static void igmp_ifc_timer_expire(unsigned long data)
                igmp_ifc_start_timer(in_dev,
                                     unsolicited_report_interval(in_dev));
        }
-       __in_dev_put(in_dev);
+       in_dev_put(in_dev);
 }
 
 static void igmp_ifc_event(struct in_device *in_dev)
index 6acb541c90910204f02449e7500138362da6998a..56e82a4027b44399b31c3fcaa3a9304af546b98f 100644 (file)
@@ -29,27 +29,19 @@ const char inet_csk_timer_bug_msg[] = "inet_csk BUG: unknown timer value\n";
 EXPORT_SYMBOL(inet_csk_timer_bug_msg);
 #endif
 
-/*
- * This struct holds the first and last local port number.
- */
-struct local_ports sysctl_local_ports __read_mostly = {
-       .lock = __SEQLOCK_UNLOCKED(sysctl_local_ports.lock),
-       .range = { 32768, 61000 },
-};
-
 unsigned long *sysctl_local_reserved_ports;
 EXPORT_SYMBOL(sysctl_local_reserved_ports);
 
-void inet_get_local_port_range(int *low, int *high)
+void inet_get_local_port_range(struct net *net, int *low, int *high)
 {
        unsigned int seq;
 
        do {
-               seq = read_seqbegin(&sysctl_local_ports.lock);
+               seq = read_seqbegin(&net->ipv4.sysctl_local_ports.lock);
 
-               *low = sysctl_local_ports.range[0];
-               *high = sysctl_local_ports.range[1];
-       } while (read_seqretry(&sysctl_local_ports.lock, seq));
+               *low = net->ipv4.sysctl_local_ports.range[0];
+               *high = net->ipv4.sysctl_local_ports.range[1];
+       } while (read_seqretry(&net->ipv4.sysctl_local_ports.lock, seq));
 }
 EXPORT_SYMBOL(inet_get_local_port_range);
 
@@ -79,17 +71,16 @@ int inet_csk_bind_conflict(const struct sock *sk,
                            (!reuseport || !sk2->sk_reuseport ||
                            (sk2->sk_state != TCP_TIME_WAIT &&
                             !uid_eq(uid, sock_i_uid(sk2))))) {
-                               const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2);
-                               if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) ||
-                                   sk2_rcv_saddr == sk_rcv_saddr(sk))
+
+                               if (!sk2->sk_rcv_saddr || !sk->sk_rcv_saddr ||
+                                   sk2->sk_rcv_saddr == sk->sk_rcv_saddr)
                                        break;
                        }
                        if (!relax && reuse && sk2->sk_reuse &&
                            sk2->sk_state != TCP_LISTEN) {
-                               const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2);
 
-                               if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) ||
-                                   sk2_rcv_saddr == sk_rcv_saddr(sk))
+                               if (!sk2->sk_rcv_saddr || !sk->sk_rcv_saddr ||
+                                   sk2->sk_rcv_saddr == sk->sk_rcv_saddr)
                                        break;
                        }
                }
@@ -116,7 +107,7 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum)
                int remaining, rover, low, high;
 
 again:
-               inet_get_local_port_range(&low, &high);
+               inet_get_local_port_range(net, &low, &high);
                remaining = (high - low) + 1;
                smallest_rover = rover = net_random() % remaining + low;
 
index 5f648751fce2d03418f4a6425c7ba0dbfb3ddfc0..22000279efc89685ea0aab1128ec8cc990e4a784 100644 (file)
@@ -222,7 +222,7 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
                               u32 portid, u32 seq, u16 nlmsg_flags,
                               const struct nlmsghdr *unlh)
 {
-       long tmo;
+       s32 tmo;
        struct inet_diag_msg *r;
        struct nlmsghdr *nlh;
 
@@ -234,7 +234,7 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
        r = nlmsg_data(nlh);
        BUG_ON(tw->tw_state != TCP_TIME_WAIT);
 
-       tmo = tw->tw_ttd - jiffies;
+       tmo = tw->tw_ttd - inet_tw_time_stamp();
        if (tmo < 0)
                tmo = 0;
 
@@ -248,7 +248,7 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
        r->id.idiag_dst[0]    = tw->tw_daddr;
        r->idiag_state        = tw->tw_substate;
        r->idiag_timer        = 3;
-       r->idiag_expires      = DIV_ROUND_UP(tmo * 1000, HZ);
+       r->idiag_expires      = jiffies_to_msecs(tmo);
        r->idiag_rqueue       = 0;
        r->idiag_wqueue       = 0;
        r->idiag_uid          = 0;
index 7bd8983dbfcf308e61dc55bb9491b3dd6866fa35..2779037bd1135378c293190b448361b7b6fe55a3 100644 (file)
@@ -494,7 +494,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
                u32 offset = hint + port_offset;
                struct inet_timewait_sock *tw = NULL;
 
-               inet_get_local_port_range(&low, &high);
+               inet_get_local_port_range(net, &low, &high);
                remaining = (high - low) + 1;
 
                local_bh_disable();
index 1f27c9f4afd07fbf55589ed4e6e730375dcdc0a3..9bcd8f7234ec4755235e7940377119a1c832c80d 100644 (file)
@@ -387,11 +387,11 @@ void inet_twsk_schedule(struct inet_timewait_sock *tw,
                        if (slot >= INET_TWDR_TWKILL_SLOTS)
                                slot = INET_TWDR_TWKILL_SLOTS - 1;
                }
-               tw->tw_ttd = jiffies + timeo;
+               tw->tw_ttd = inet_tw_time_stamp() + timeo;
                slot = (twdr->slot + slot) & (INET_TWDR_TWKILL_SLOTS - 1);
                list = &twdr->cells[slot];
        } else {
-               tw->tw_ttd = jiffies + (slot << INET_TWDR_RECYCLE_TICK);
+               tw->tw_ttd = inet_tw_time_stamp() + (slot << INET_TWDR_RECYCLE_TICK);
 
                if (twdr->twcal_hand < 0) {
                        twdr->twcal_hand = 0;
index a04d872c54f919c7133e7830773301cdf070f3ed..7d8357bb2ba654a88fbf564897beb88e412f4974 100644 (file)
@@ -1060,6 +1060,9 @@ static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
                         rt->dst.dev->mtu : dst_mtu(&rt->dst);
        cork->dst = &rt->dst;
        cork->length = 0;
+       cork->ttl = ipc->ttl;
+       cork->tos = ipc->tos;
+       cork->priority = ipc->priority;
        cork->tx_flags = ipc->tx_flags;
 
        return 0;
@@ -1311,7 +1314,9 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
        if (cork->flags & IPCORK_OPT)
                opt = cork->opt;
 
-       if (rt->rt_type == RTN_MULTICAST)
+       if (cork->ttl != 0)
+               ttl = cork->ttl;
+       else if (rt->rt_type == RTN_MULTICAST)
                ttl = inet->mc_ttl;
        else
                ttl = ip_select_ttl(inet, &rt->dst);
@@ -1319,7 +1324,7 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
        iph = ip_hdr(skb);
        iph->version = 4;
        iph->ihl = 5;
-       iph->tos = inet->tos;
+       iph->tos = (cork->tos != -1) ? cork->tos : inet->tos;
        iph->frag_off = df;
        iph->ttl = ttl;
        iph->protocol = sk->sk_protocol;
@@ -1331,7 +1336,7 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
                ip_options_build(skb, opt, cork->addr, rt, 0);
        }
 
-       skb->priority = sk->sk_priority;
+       skb->priority = (cork->tos != -1) ? cork->priority: sk->sk_priority;
        skb->mark = sk->sk_mark;
        /*
         * Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec
@@ -1481,6 +1486,8 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
        ipc.addr = daddr;
        ipc.opt = NULL;
        ipc.tx_flags = 0;
+       ipc.ttl = 0;
+       ipc.tos = -1;
 
        if (replyopts.opt.opt.optlen) {
                ipc.opt = &replyopts.opt;
index d9c4f113d7093bba7eba2beefc31cd0af4b9bb95..56e34457ac070922ba7467f8c4fcba94d22827d7 100644 (file)
@@ -189,7 +189,7 @@ EXPORT_SYMBOL(ip_cmsg_recv);
 
 int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc)
 {
-       int err;
+       int err, val;
        struct cmsghdr *cmsg;
 
        for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
@@ -215,6 +215,24 @@ int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc)
                        ipc->addr = info->ipi_spec_dst.s_addr;
                        break;
                }
+               case IP_TTL:
+                       if (cmsg->cmsg_len != CMSG_LEN(sizeof(int)))
+                               return -EINVAL;
+                       val = *(int *)CMSG_DATA(cmsg);
+                       if (val < 1 || val > 255)
+                               return -EINVAL;
+                       ipc->ttl = val;
+                       break;
+               case IP_TOS:
+                       if (cmsg->cmsg_len != CMSG_LEN(sizeof(int)))
+                               return -EINVAL;
+                       val = *(int *)CMSG_DATA(cmsg);
+                       if (val < 0 || val > 255)
+                               return -EINVAL;
+                       ipc->tos = val;
+                       ipc->priority = rt_tos2priority(ipc->tos);
+                       break;
+
                default:
                        return -EINVAL;
                }
index ac9fabe0300f613e87bbe4d70e9c861fbc95889e..63a6d6d6b87581d3ac3bda52cab5833f3cb169ab 100644 (file)
@@ -623,6 +623,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
                        tunnel->err_count = 0;
        }
 
+       tos = ip_tunnel_ecn_encap(tos, inner_iph, skb);
        ttl = tnl_params->ttl;
        if (ttl == 0) {
                if (skb->protocol == htons(ETH_P_IP))
@@ -641,18 +642,17 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
 
        max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr)
                        + rt->dst.header_len;
-       if (max_headroom > dev->needed_headroom) {
+       if (max_headroom > dev->needed_headroom)
                dev->needed_headroom = max_headroom;
-               if (skb_cow_head(skb, dev->needed_headroom)) {
-                       dev->stats.tx_dropped++;
-                       dev_kfree_skb(skb);
-                       return;
-               }
+
+       if (skb_cow_head(skb, dev->needed_headroom)) {
+               dev->stats.tx_dropped++;
+               dev_kfree_skb(skb);
+               return;
        }
 
        err = iptunnel_xmit(rt, skb, fl4.saddr, fl4.daddr, protocol,
-                           ip_tunnel_ecn_encap(tos, inner_iph, skb), ttl, df,
-                           !net_eq(tunnel->net, dev_net(dev)));
+                           tos, ttl, df, !net_eq(tunnel->net, dev_net(dev)));
        iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
 
        return;
@@ -853,8 +853,10 @@ int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
        /* FB netdevice is special: we have one, and only one per netns.
         * Allowing to move it to another netns is clearly unsafe.
         */
-       if (!IS_ERR(itn->fb_tunnel_dev))
+       if (!IS_ERR(itn->fb_tunnel_dev)) {
                itn->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL;
+               ip_tunnel_add(itn, netdev_priv(itn->fb_tunnel_dev));
+       }
        rtnl_unlock();
 
        return PTR_RET(itn->fb_tunnel_dev);
@@ -884,8 +886,6 @@ static void ip_tunnel_destroy(struct ip_tunnel_net *itn, struct list_head *head,
                        if (!net_eq(dev_net(t->dev), net))
                                unregister_netdevice_queue(t->dev, head);
        }
-       if (itn->fb_tunnel_dev)
-               unregister_netdevice_queue(itn->fb_tunnel_dev, head);
 }
 
 void ip_tunnel_delete_net(struct ip_tunnel_net *itn, struct rtnl_link_ops *ops)
index d6c856b17fd4ff22c0034af676a648730dffce82..c31e3ad98ef28e91eff2679d6976d4c20c055410 100644 (file)
@@ -61,7 +61,7 @@ int iptunnel_xmit(struct rtable *rt, struct sk_buff *skb,
        memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
 
        /* Push down and install the IP header. */
-       __skb_push(skb, sizeof(struct iphdr));
+       skb_push(skb, sizeof(struct iphdr));
        skb_reset_network_header(skb);
 
        iph = ip_hdr(skb);
index e805e7b3030e3dad2f8fd83d140f0bb7f100d69c..91f69bc883fe80cc97df0e34744caa6d40186bf8 100644 (file)
@@ -49,70 +49,6 @@ static struct rtnl_link_ops vti_link_ops __read_mostly;
 static int vti_net_id __read_mostly;
 static int vti_tunnel_init(struct net_device *dev);
 
-static int vti_err(struct sk_buff *skb, u32 info)
-{
-
-       /* All the routers (except for Linux) return only
-        * 8 bytes of packet payload. It means, that precise relaying of
-        * ICMP in the real Internet is absolutely infeasible.
-        */
-       struct net *net = dev_net(skb->dev);
-       struct ip_tunnel_net *itn = net_generic(net, vti_net_id);
-       struct iphdr *iph = (struct iphdr *)skb->data;
-       const int type = icmp_hdr(skb)->type;
-       const int code = icmp_hdr(skb)->code;
-       struct ip_tunnel *t;
-       int err;
-
-       switch (type) {
-       default:
-       case ICMP_PARAMETERPROB:
-               return 0;
-
-       case ICMP_DEST_UNREACH:
-               switch (code) {
-               case ICMP_SR_FAILED:
-               case ICMP_PORT_UNREACH:
-                       /* Impossible event. */
-                       return 0;
-               default:
-                       /* All others are translated to HOST_UNREACH. */
-                       break;
-               }
-               break;
-       case ICMP_TIME_EXCEEDED:
-               if (code != ICMP_EXC_TTL)
-                       return 0;
-               break;
-       }
-
-       err = -ENOENT;
-
-       t = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
-                            iph->daddr, iph->saddr, 0);
-       if (t == NULL)
-               goto out;
-
-       if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
-               ipv4_update_pmtu(skb, dev_net(skb->dev), info,
-                                t->parms.link, 0, IPPROTO_IPIP, 0);
-               err = 0;
-               goto out;
-       }
-
-       err = 0;
-       if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
-               goto out;
-
-       if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
-               t->err_count++;
-       else
-               t->err_count = 1;
-       t->err_time = jiffies;
-out:
-       return err;
-}
-
 /* We dont digest the packet therefore let the packet pass */
 static int vti_rcv(struct sk_buff *skb)
 {
@@ -296,9 +232,8 @@ static void __net_init vti_fb_tunnel_init(struct net_device *dev)
        iph->ihl                = 5;
 }
 
-static struct xfrm_tunnel vti_handler __read_mostly = {
+static struct xfrm_tunnel_notifier vti_handler __read_mostly = {
        .handler        =       vti_rcv,
-       .err_handler    =       vti_err,
        .priority       =       1,
 };
 
index 67e17dcda65e64f27b9ca5b244561ab2d7fc594f..b6346bf2fde3bc16f0655e9c9f7c6ade5ffeae8a 100644 (file)
@@ -267,7 +267,8 @@ synproxy_tg4(struct sk_buff *skb, const struct xt_action_param *par)
        if (th == NULL)
                return NF_DROP;
 
-       synproxy_parse_options(skb, par->thoff, th, &opts);
+       if (!synproxy_parse_options(skb, par->thoff, th, &opts))
+               return NF_DROP;
 
        if (th->syn && !(th->ack || th->fin || th->rst)) {
                /* Initial SYN from client */
@@ -350,7 +351,8 @@ static unsigned int ipv4_synproxy_hook(unsigned int hooknum,
 
                /* fall through */
        case TCP_CONNTRACK_SYN_SENT:
-               synproxy_parse_options(skb, thoff, th, &opts);
+               if (!synproxy_parse_options(skb, thoff, th, &opts))
+                       return NF_DROP;
 
                if (!th->syn && th->ack &&
                    CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) {
@@ -373,7 +375,9 @@ static unsigned int ipv4_synproxy_hook(unsigned int hooknum,
                if (!th->syn || !th->ack)
                        break;
 
-               synproxy_parse_options(skb, thoff, th, &opts);
+               if (!synproxy_parse_options(skb, thoff, th, &opts))
+                       return NF_DROP;
+
                if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP)
                        synproxy->tsoff = opts.tsval - synproxy->its;
 
index d7d9882d4caea169964a58e294ffe6c73a99d36c..a626104431527876187589e5e0be7aac1f5f3dc0 100644 (file)
@@ -237,11 +237,11 @@ static void inet_get_ping_group_range_net(struct net *net, kgid_t *low,
        unsigned int seq;
 
        do {
-               seq = read_seqbegin(&sysctl_local_ports.lock);
+               seq = read_seqbegin(&net->ipv4.sysctl_local_ports.lock);
 
                *low = data[0];
                *high = data[1];
-       } while (read_seqretry(&sysctl_local_ports.lock, seq));
+       } while (read_seqretry(&net->ipv4.sysctl_local_ports.lock, seq));
 }
 
 
@@ -713,6 +713,8 @@ int ping_v4_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
        ipc.opt = NULL;
        ipc.oif = sk->sk_bound_dev_if;
        ipc.tx_flags = 0;
+       ipc.ttl = 0;
+       ipc.tos = -1;
 
        sock_tx_timestamp(sk, &ipc.tx_flags);
 
@@ -744,7 +746,7 @@ int ping_v4_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
                        return -EINVAL;
                faddr = ipc.opt->opt.faddr;
        }
-       tos = RT_TOS(inet->tos);
+       tos = get_rttos(&ipc, inet);
        if (sock_flag(sk, SOCK_LOCALROUTE) ||
            (msg->msg_flags & MSG_DONTROUTE) ||
            (ipc.opt && ipc.opt->opt.is_strictroute)) {
index bfec521c717fd2320242c24e7a7f74a64c1c1a44..b2fa14c1a6f135ee58f1ef96ee018bcb4becf1ed 100644 (file)
@@ -218,8 +218,10 @@ static void raw_err(struct sock *sk, struct sk_buff *skb, u32 info)
 
        if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED)
                ipv4_sk_update_pmtu(skb, sk, info);
-       else if (type == ICMP_REDIRECT)
+       else if (type == ICMP_REDIRECT) {
                ipv4_sk_redirect(skb, sk);
+               return;
+       }
 
        /* Report error on raw socket, if:
           1. User requested ip_recverr.
@@ -517,6 +519,8 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
        ipc.addr = inet->inet_saddr;
        ipc.opt = NULL;
        ipc.tx_flags = 0;
+       ipc.ttl = 0;
+       ipc.tos = -1;
        ipc.oif = sk->sk_bound_dev_if;
 
        if (msg->msg_controllen) {
@@ -556,7 +560,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
                        daddr = ipc.opt->opt.faddr;
                }
        }
-       tos = RT_CONN_FLAGS(sk);
+       tos = get_rtconn_flags(&ipc, sk);
        if (msg->msg_flags & MSG_DONTROUTE)
                tos |= RTO_ONLINK;
 
index 540279f4c531be079e45c027aa11d37920c14a88..c08f096d46b5ec83f379e84e6a5bd32e0088c22f 100644 (file)
@@ -43,12 +43,12 @@ static int ip_ping_group_range_min[] = { 0, 0 };
 static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
 
 /* Update system visible IP port range */
-static void set_local_port_range(int range[2])
+static void set_local_port_range(struct net *net, int range[2])
 {
-       write_seqlock(&sysctl_local_ports.lock);
-       sysctl_local_ports.range[0] = range[0];
-       sysctl_local_ports.range[1] = range[1];
-       write_sequnlock(&sysctl_local_ports.lock);
+       write_seqlock(&net->ipv4.sysctl_local_ports.lock);
+       net->ipv4.sysctl_local_ports.range[0] = range[0];
+       net->ipv4.sysctl_local_ports.range[1] = range[1];
+       write_sequnlock(&net->ipv4.sysctl_local_ports.lock);
 }
 
 /* Validate changes from /proc interface. */
@@ -56,6 +56,8 @@ static int ipv4_local_port_range(struct ctl_table *table, int write,
                                 void __user *buffer,
                                 size_t *lenp, loff_t *ppos)
 {
+       struct net *net =
+               container_of(table->data, struct net, ipv4.sysctl_local_ports.range);
        int ret;
        int range[2];
        struct ctl_table tmp = {
@@ -66,14 +68,15 @@ static int ipv4_local_port_range(struct ctl_table *table, int write,
                .extra2 = &ip_local_port_range_max,
        };
 
-       inet_get_local_port_range(range, range + 1);
+       inet_get_local_port_range(net, &range[0], &range[1]);
+
        ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
 
        if (write && ret == 0) {
                if (range[1] < range[0])
                        ret = -EINVAL;
                else
-                       set_local_port_range(range);
+                       set_local_port_range(net, range);
        }
 
        return ret;
@@ -83,23 +86,27 @@ static int ipv4_local_port_range(struct ctl_table *table, int write,
 static void inet_get_ping_group_range_table(struct ctl_table *table, kgid_t *low, kgid_t *high)
 {
        kgid_t *data = table->data;
+       struct net *net =
+               container_of(table->data, struct net, ipv4.sysctl_ping_group_range);
        unsigned int seq;
        do {
-               seq = read_seqbegin(&sysctl_local_ports.lock);
+               seq = read_seqbegin(&net->ipv4.sysctl_local_ports.lock);
 
                *low = data[0];
                *high = data[1];
-       } while (read_seqretry(&sysctl_local_ports.lock, seq));
+       } while (read_seqretry(&net->ipv4.sysctl_local_ports.lock, seq));
 }
 
 /* Update system visible IP port range */
 static void set_ping_group_range(struct ctl_table *table, kgid_t low, kgid_t high)
 {
        kgid_t *data = table->data;
-       write_seqlock(&sysctl_local_ports.lock);
+       struct net *net =
+               container_of(table->data, struct net, ipv4.sysctl_ping_group_range);
+       write_seqlock(&net->ipv4.sysctl_local_ports.lock);
        data[0] = low;
        data[1] = high;
-       write_sequnlock(&sysctl_local_ports.lock);
+       write_sequnlock(&net->ipv4.sysctl_local_ports.lock);
 }
 
 /* Validate changes from /proc interface. */
@@ -474,13 +481,6 @@ static struct ctl_table ipv4_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec
        },
-       {
-               .procname       = "ip_local_port_range",
-               .data           = &sysctl_local_ports.range,
-               .maxlen         = sizeof(sysctl_local_ports.range),
-               .mode           = 0644,
-               .proc_handler   = ipv4_local_port_range,
-       },
        {
                .procname       = "ip_local_reserved_ports",
                .data           = NULL, /* initialized in sysctl_ipv4_init */
@@ -853,6 +853,13 @@ static struct ctl_table ipv4_net_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec
        },
+       {
+               .procname       = "ip_local_port_range",
+               .maxlen         = sizeof(init_net.ipv4.sysctl_local_ports.range),
+               .data           = &init_net.ipv4.sysctl_local_ports.range,
+               .mode           = 0644,
+               .proc_handler   = ipv4_local_port_range,
+       },
        {
                .procname       = "tcp_mem",
                .maxlen         = sizeof(init_net.ipv4.sysctl_tcp_mem),
@@ -888,6 +895,8 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
                        &net->ipv4.sysctl_ping_group_range;
                table[7].data =
                        &net->ipv4.sysctl_tcp_ecn;
+               table[8].data =
+                       &net->ipv4.sysctl_local_ports.range;
 
                /* Don't export sysctls to unprivileged users */
                if (net->user_ns != &init_user_ns)
@@ -901,6 +910,13 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
        net->ipv4.sysctl_ping_group_range[0] = make_kgid(&init_user_ns, 1);
        net->ipv4.sysctl_ping_group_range[1] = make_kgid(&init_user_ns, 0);
 
+       /*
+        * Set defaults for local port range
+        */
+       seqlock_init(&net->ipv4.sysctl_local_ports.lock);
+       net->ipv4.sysctl_local_ports.range[0] =  32768;
+       net->ipv4.sysctl_local_ports.range[1] =  61000;
+
        tcp_init_mem(net);
 
        net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
index 5d083855c111957fc5e7362ef0e1c79721103ff7..fa6cf1f91ff86023647c58bd0261fa136b6a13ca 100644 (file)
@@ -267,11 +267,31 @@ static bool TCP_ECN_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr
  * 1. Tuning sk->sk_sndbuf, when connection enters established state.
  */
 
-static void tcp_fixup_sndbuf(struct sock *sk)
+static void tcp_sndbuf_expand(struct sock *sk)
 {
-       int sndmem = SKB_TRUESIZE(tcp_sk(sk)->rx_opt.mss_clamp + MAX_TCP_HEADER);
+       const struct tcp_sock *tp = tcp_sk(sk);
+       int sndmem, per_mss;
+       u32 nr_segs;
+
+       /* Worst case is non GSO/TSO : each frame consumes one skb
+        * and skb->head is kmalloced using power of two area of memory
+        */
+       per_mss = max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache) +
+                 MAX_TCP_HEADER +
+                 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+       per_mss = roundup_pow_of_two(per_mss) +
+                 SKB_DATA_ALIGN(sizeof(struct sk_buff));
+
+       nr_segs = max_t(u32, TCP_INIT_CWND, tp->snd_cwnd);
+       nr_segs = max_t(u32, nr_segs, tp->reordering + 1);
+
+       /* Fast Recovery (RFC 5681 3.2) :
+        * Cubic needs 1.7 factor, rounded to 2 to include
+        * extra cushion (application might react slowly to POLLOUT)
+        */
+       sndmem = 2 * nr_segs * per_mss;
 
-       sndmem *= TCP_INIT_CWND;
        if (sk->sk_sndbuf < sndmem)
                sk->sk_sndbuf = min(sndmem, sysctl_tcp_wmem[2]);
 }
@@ -376,7 +396,7 @@ void tcp_init_buffer_space(struct sock *sk)
        if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK))
                tcp_fixup_rcvbuf(sk);
        if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK))
-               tcp_fixup_sndbuf(sk);
+               tcp_sndbuf_expand(sk);
 
        tp->rcvq_space.space = tp->rcv_wnd;
        tp->rcvq_space.time = tcp_time_stamp;
@@ -735,7 +755,7 @@ static void tcp_update_pacing_rate(struct sock *sk)
        if (tp->srtt > 8 + 2)
                do_div(rate, tp->srtt);
 
-       sk->sk_pacing_rate = min_t(u64, rate, ~0U);
+       sk->sk_pacing_rate = min_t(u64, rate, sk->sk_max_pacing_rate);
 }
 
 /* Calculate rto without backoff.  This is the second half of Van Jacobson's
@@ -2992,7 +3012,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
        const struct inet_connection_sock *icsk = inet_csk(sk);
        struct sk_buff *skb;
        u32 now = tcp_time_stamp;
-       int fully_acked = true;
+       bool fully_acked = true;
        int flag = 0;
        u32 pkts_acked = 0;
        u32 reord = tp->packets_out;
@@ -4723,15 +4743,7 @@ static void tcp_new_space(struct sock *sk)
        struct tcp_sock *tp = tcp_sk(sk);
 
        if (tcp_should_expand_sndbuf(sk)) {
-               int sndmem = SKB_TRUESIZE(max_t(u32,
-                                               tp->rx_opt.mss_clamp,
-                                               tp->mss_cache) +
-                                         MAX_TCP_HEADER);
-               int demanded = max_t(unsigned int, tp->snd_cwnd,
-                                    tp->reordering + 1);
-               sndmem *= 2 * demanded;
-               if (sndmem > sk->sk_sndbuf)
-                       sk->sk_sndbuf = min(sndmem, sysctl_tcp_wmem[2]);
+               tcp_sndbuf_expand(sk);
                tp->snd_cwnd_stamp = tcp_time_stamp;
        }
 
index b14266bb91eb5e3b1f43f3329dc2510c8be26a19..5d6b1a609da858cd29386c573ecb4169f04e5bc7 100644 (file)
@@ -1410,8 +1410,8 @@ static int tcp_v4_conn_req_fastopen(struct sock *sk,
        inet_csk(child)->icsk_af_ops->rebuild_header(child);
        tcp_init_congestion_control(child);
        tcp_mtup_init(child);
-       tcp_init_buffer_space(child);
        tcp_init_metrics(child);
+       tcp_init_buffer_space(child);
 
        /* Queue the data carried in the SYN packet. We need to first
         * bump skb's refcnt because the caller will attempt to free it.
index 7c83cb8bf1378022ba3a68c56403ef40801cd3ae..e6bb8256e59f3738280a022f250f6fefd621cb38 100644 (file)
@@ -895,8 +895,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
 
        skb_orphan(skb);
        skb->sk = sk;
-       skb->destructor = (sysctl_tcp_limit_output_bytes > 0) ?
-                         tcp_wfree : sock_wfree;
+       skb->destructor = tcp_wfree;
        atomic_add(skb->truesize, &sk->sk_wmem_alloc);
 
        /* Build TCP header and checksum it. */
@@ -1840,7 +1839,6 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
        while ((skb = tcp_send_head(sk))) {
                unsigned int limit;
 
-
                tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
                BUG_ON(!tso_segs);
 
@@ -1869,13 +1867,20 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
                                break;
                }
 
-               /* TSQ : sk_wmem_alloc accounts skb truesize,
-                * including skb overhead. But thats OK.
+               /* TCP Small Queues :
+                * Control number of packets in qdisc/devices to two packets / or ~1 ms.
+                * This allows for :
+                *  - better RTT estimation and ACK scheduling
+                *  - faster recovery
+                *  - high rates
                 */
-               if (atomic_read(&sk->sk_wmem_alloc) >= sysctl_tcp_limit_output_bytes) {
+               limit = max(skb->truesize, sk->sk_pacing_rate >> 10);
+
+               if (atomic_read(&sk->sk_wmem_alloc) > limit) {
                        set_bit(TSQ_THROTTLED, &tp->tsq_flags);
                        break;
                }
+
                limit = mss_now;
                if (tso_segs > 1 && !tcp_urg_mode(tp))
                        limit = tcp_mss_split_point(sk, skb, mss_now,
index 74d2c95db57f3768d62ee450ed6bab64ca09b26d..c41833e9c0831efdd2a8125387e361f636531731 100644 (file)
@@ -219,7 +219,7 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
                unsigned short first, last;
                DECLARE_BITMAP(bitmap, PORTS_PER_CHAIN);
 
-               inet_get_local_port_range(&low, &high);
+               inet_get_local_port_range(net, &low, &high);
                remaining = (high - low) + 1;
 
                rand = net_random();
@@ -658,7 +658,7 @@ void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
                break;
        case ICMP_REDIRECT:
                ipv4_sk_redirect(skb, sk);
-               break;
+               goto out;
        }
 
        /*
@@ -855,6 +855,8 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
 
        ipc.opt = NULL;
        ipc.tx_flags = 0;
+       ipc.ttl = 0;
+       ipc.tos = -1;
 
        getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
 
@@ -938,7 +940,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
                faddr = ipc.opt->opt.faddr;
                connected = 0;
        }
-       tos = RT_TOS(inet->tos);
+       tos = get_rttos(&ipc, inet);
        if (sock_flag(sk, SOCK_LOCALROUTE) ||
            (msg->msg_flags & MSG_DONTROUTE) ||
            (ipc.opt && ipc.opt->opt.is_strictroute)) {
index b5663c37f089ed0afe33115bcbfad2555b8d0f48..31b18152528fe4dbf9e500ae0c9a2a1a5a3a2adf 100644 (file)
 #include <net/xfrm.h>
 
 /* Informational hook. The decap is still done here. */
-static struct xfrm_tunnel __rcu *rcv_notify_handlers __read_mostly;
+static struct xfrm_tunnel_notifier __rcu *rcv_notify_handlers __read_mostly;
 static DEFINE_MUTEX(xfrm4_mode_tunnel_input_mutex);
 
-int xfrm4_mode_tunnel_input_register(struct xfrm_tunnel *handler)
+int xfrm4_mode_tunnel_input_register(struct xfrm_tunnel_notifier *handler)
 {
-       struct xfrm_tunnel __rcu **pprev;
-       struct xfrm_tunnel *t;
+       struct xfrm_tunnel_notifier __rcu **pprev;
+       struct xfrm_tunnel_notifier *t;
        int ret = -EEXIST;
        int priority = handler->priority;
 
@@ -50,10 +50,10 @@ err:
 }
 EXPORT_SYMBOL_GPL(xfrm4_mode_tunnel_input_register);
 
-int xfrm4_mode_tunnel_input_deregister(struct xfrm_tunnel *handler)
+int xfrm4_mode_tunnel_input_deregister(struct xfrm_tunnel_notifier *handler)
 {
-       struct xfrm_tunnel __rcu **pprev;
-       struct xfrm_tunnel *t;
+       struct xfrm_tunnel_notifier __rcu **pprev;
+       struct xfrm_tunnel_notifier *t;
        int ret = -ENOENT;
 
        mutex_lock(&xfrm4_mode_tunnel_input_mutex);
@@ -134,7 +134,7 @@ static int xfrm4_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
 
 static int xfrm4_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
 {
-       struct xfrm_tunnel *handler;
+       struct xfrm_tunnel_notifier *handler;
        int err = -EINVAL;
 
        if (XFRM_MODE_SKB_CB(skb)->protocol != IPPROTO_IPIP)
index d6ff12617f36f9eabbf7c9744b3b8121a567c1bf..cd3fb301da38a970cd48302386428a8923f21c91 100644 (file)
@@ -1499,6 +1499,33 @@ static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr,
        return false;
 }
 
+/* Compares an address/prefix_len with addresses on device @dev.
+ * If one is found it returns true.
+ */
+bool ipv6_chk_custom_prefix(const struct in6_addr *addr,
+       const unsigned int prefix_len, struct net_device *dev)
+{
+       struct inet6_dev *idev;
+       struct inet6_ifaddr *ifa;
+       bool ret = false;
+
+       rcu_read_lock();
+       idev = __in6_dev_get(dev);
+       if (idev) {
+               read_lock_bh(&idev->lock);
+               list_for_each_entry(ifa, &idev->addr_list, if_list) {
+                       ret = ipv6_prefix_equal(addr, &ifa->addr, prefix_len);
+                       if (ret)
+                               break;
+               }
+               read_unlock_bh(&idev->lock);
+       }
+       rcu_read_unlock();
+
+       return ret;
+}
+EXPORT_SYMBOL(ipv6_chk_custom_prefix);
+
 int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev)
 {
        struct inet6_dev *idev;
@@ -2193,43 +2220,21 @@ ok:
                        else
                                stored_lft = 0;
                        if (!update_lft && !create && stored_lft) {
-                               if (valid_lft > MIN_VALID_LIFETIME ||
-                                   valid_lft > stored_lft)
-                                       update_lft = 1;
-                               else if (stored_lft <= MIN_VALID_LIFETIME) {
-                                       /* valid_lft <= stored_lft is always true */
-                                       /*
-                                        * RFC 4862 Section 5.5.3e:
-                                        * "Note that the preferred lifetime of
-                                        *  the corresponding address is always
-                                        *  reset to the Preferred Lifetime in
-                                        *  the received Prefix Information
-                                        *  option, regardless of whether the
-                                        *  valid lifetime is also reset or
-                                        *  ignored."
-                                        *
-                                        *  So if the preferred lifetime in
-                                        *  this advertisement is different
-                                        *  than what we have stored, but the
-                                        *  valid lifetime is invalid, just
-                                        *  reset prefered_lft.
-                                        *
-                                        *  We must set the valid lifetime
-                                        *  to the stored lifetime since we'll
-                                        *  be updating the timestamp below,
-                                        *  else we'll set it back to the
-                                        *  minimum.
-                                        */
-                                       if (prefered_lft != ifp->prefered_lft) {
-                                               valid_lft = stored_lft;
-                                               update_lft = 1;
-                                       }
-                               } else {
-                                       valid_lft = MIN_VALID_LIFETIME;
-                                       if (valid_lft < prefered_lft)
-                                               prefered_lft = valid_lft;
-                                       update_lft = 1;
-                               }
+                               const u32 minimum_lft = min(
+                                       stored_lft, (u32)MIN_VALID_LIFETIME);
+                               valid_lft = max(valid_lft, minimum_lft);
+
+                               /* RFC4862 Section 5.5.3e:
+                                * "Note that the preferred lifetime of the
+                                *  corresponding address is always reset to
+                                *  the Preferred Lifetime in the received
+                                *  Prefix Information option, regardless of
+                                *  whether the valid lifetime is also reset or
+                                *  ignored."
+                                *
+                                * So we should always update prefered_lft here.
+                                */
+                               update_lft = 1;
                        }
 
                        if (update_lft) {
index 5bec666aba61d464fab4e77684eedd4265143cf9..5550a8113a6dc5f202c0ccda2c7166e98e81a7e9 100644 (file)
@@ -1529,25 +1529,6 @@ static void fib6_clean_tree(struct net *net, struct fib6_node *root,
        fib6_walk(&c.w);
 }
 
-void fib6_clean_all_ro(struct net *net, int (*func)(struct rt6_info *, void *arg),
-                   int prune, void *arg)
-{
-       struct fib6_table *table;
-       struct hlist_head *head;
-       unsigned int h;
-
-       rcu_read_lock();
-       for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
-               head = &net->ipv6.fib_table_hash[h];
-               hlist_for_each_entry_rcu(table, head, tb6_hlist) {
-                       read_lock_bh(&table->tb6_lock);
-                       fib6_clean_tree(net, &table->tb6_root,
-                                       func, prune, arg);
-                       read_unlock_bh(&table->tb6_lock);
-               }
-       }
-       rcu_read_unlock();
-}
 void fib6_clean_all(struct net *net, int (*func)(struct rt6_info *, void *arg),
                    int prune, void *arg)
 {
@@ -1782,3 +1763,189 @@ void fib6_gc_cleanup(void)
        unregister_pernet_subsys(&fib6_net_ops);
        kmem_cache_destroy(fib6_node_kmem);
 }
+
+#ifdef CONFIG_PROC_FS
+
+struct ipv6_route_iter {
+       struct seq_net_private p;
+       struct fib6_walker_t w;
+       loff_t skip;
+       struct fib6_table *tbl;
+       __u32 sernum;
+};
+
+static int ipv6_route_seq_show(struct seq_file *seq, void *v)
+{
+       struct rt6_info *rt = v;
+       struct ipv6_route_iter *iter = seq->private;
+
+       seq_printf(seq, "%pi6 %02x ", &rt->rt6i_dst.addr, rt->rt6i_dst.plen);
+
+#ifdef CONFIG_IPV6_SUBTREES
+       seq_printf(seq, "%pi6 %02x ", &rt->rt6i_src.addr, rt->rt6i_src.plen);
+#else
+       seq_puts(seq, "00000000000000000000000000000000 00 ");
+#endif
+       if (rt->rt6i_flags & RTF_GATEWAY)
+               seq_printf(seq, "%pi6", &rt->rt6i_gateway);
+       else
+               seq_puts(seq, "00000000000000000000000000000000");
+
+       seq_printf(seq, " %08x %08x %08x %08x %8s\n",
+                  rt->rt6i_metric, atomic_read(&rt->dst.__refcnt),
+                  rt->dst.__use, rt->rt6i_flags,
+                  rt->dst.dev ? rt->dst.dev->name : "");
+       iter->w.leaf = NULL;
+       return 0;
+}
+
+static int ipv6_route_yield(struct fib6_walker_t *w)
+{
+       struct ipv6_route_iter *iter = w->args;
+
+       if (!iter->skip)
+               return 1;
+
+       do {
+               iter->w.leaf = iter->w.leaf->dst.rt6_next;
+               iter->skip--;
+               if (!iter->skip && iter->w.leaf)
+                       return 1;
+       } while (iter->w.leaf);
+
+       return 0;
+}
+
+static void ipv6_route_seq_setup_walk(struct ipv6_route_iter *iter)
+{
+       memset(&iter->w, 0, sizeof(iter->w));
+       iter->w.func = ipv6_route_yield;
+       iter->w.root = &iter->tbl->tb6_root;
+       iter->w.state = FWS_INIT;
+       iter->w.node = iter->w.root;
+       iter->w.args = iter;
+       iter->sernum = iter->w.root->fn_sernum;
+       INIT_LIST_HEAD(&iter->w.lh);
+       fib6_walker_link(&iter->w);
+}
+
+static struct fib6_table *ipv6_route_seq_next_table(struct fib6_table *tbl,
+                                                   struct net *net)
+{
+       unsigned int h;
+       struct hlist_node *node;
+
+       if (tbl) {
+               h = (tbl->tb6_id & (FIB6_TABLE_HASHSZ - 1)) + 1;
+               node = rcu_dereference_bh(hlist_next_rcu(&tbl->tb6_hlist));
+       } else {
+               h = 0;
+               node = NULL;
+       }
+
+       while (!node && h < FIB6_TABLE_HASHSZ) {
+               node = rcu_dereference_bh(
+                       hlist_first_rcu(&net->ipv6.fib_table_hash[h++]));
+       }
+       return hlist_entry_safe(node, struct fib6_table, tb6_hlist);
+}
+
+static void ipv6_route_check_sernum(struct ipv6_route_iter *iter)
+{
+       if (iter->sernum != iter->w.root->fn_sernum) {
+               iter->sernum = iter->w.root->fn_sernum;
+               iter->w.state = FWS_INIT;
+               iter->w.node = iter->w.root;
+               WARN_ON(iter->w.skip);
+               iter->w.skip = iter->w.count;
+       }
+}
+
+static void *ipv6_route_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+       int r;
+       struct rt6_info *n;
+       struct net *net = seq_file_net(seq);
+       struct ipv6_route_iter *iter = seq->private;
+
+       if (!v)
+               goto iter_table;
+
+       n = ((struct rt6_info *)v)->dst.rt6_next;
+       if (n) {
+               ++*pos;
+               return n;
+       }
+
+iter_table:
+       ipv6_route_check_sernum(iter);
+       read_lock(&iter->tbl->tb6_lock);
+       r = fib6_walk_continue(&iter->w);
+       read_unlock(&iter->tbl->tb6_lock);
+       if (r > 0) {
+               if (v)
+                       ++*pos;
+               return iter->w.leaf;
+       } else if (r < 0) {
+               fib6_walker_unlink(&iter->w);
+               return NULL;
+       }
+       fib6_walker_unlink(&iter->w);
+
+       iter->tbl = ipv6_route_seq_next_table(iter->tbl, net);
+       if (!iter->tbl)
+               return NULL;
+
+       ipv6_route_seq_setup_walk(iter);
+       goto iter_table;
+}
+
+static void *ipv6_route_seq_start(struct seq_file *seq, loff_t *pos)
+       __acquires(RCU_BH)
+{
+       struct net *net = seq_file_net(seq);
+       struct ipv6_route_iter *iter = seq->private;
+
+       rcu_read_lock_bh();
+       iter->tbl = ipv6_route_seq_next_table(NULL, net);
+       iter->skip = *pos;
+
+       if (iter->tbl) {
+               ipv6_route_seq_setup_walk(iter);
+               return ipv6_route_seq_next(seq, NULL, pos);
+       } else {
+               return NULL;
+       }
+}
+
+static bool ipv6_route_iter_active(struct ipv6_route_iter *iter)
+{
+       struct fib6_walker_t *w = &iter->w;
+       return w->node && !(w->state == FWS_U && w->node == w->root);
+}
+
+static void ipv6_route_seq_stop(struct seq_file *seq, void *v)
+       __releases(RCU_BH)
+{
+       struct ipv6_route_iter *iter = seq->private;
+
+       if (ipv6_route_iter_active(iter))
+               fib6_walker_unlink(&iter->w);
+
+       rcu_read_unlock_bh();
+}
+
+static const struct seq_operations ipv6_route_seq_ops = {
+       .start  = ipv6_route_seq_start,
+       .next   = ipv6_route_seq_next,
+       .stop   = ipv6_route_seq_stop,
+       .show   = ipv6_route_seq_show
+};
+
+int ipv6_route_open(struct inode *inode, struct file *file)
+{
+       return seq_open_net(inode, file, &ipv6_route_seq_ops,
+                           sizeof(struct ipv6_route_iter));
+}
+
+#endif /* CONFIG_PROC_FS */
index 6b26e9feafb98eb8d269f553cbf8339a3341526b..7bb5446b9d73c16a7f4096f2705ce60f997c7c2a 100644 (file)
@@ -618,7 +618,7 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
        struct ip6_tnl *tunnel = netdev_priv(dev);
        struct net_device *tdev;    /* Device to other host */
        struct ipv6hdr  *ipv6h;     /* Our new IP header */
-       unsigned int max_headroom /* The extra header space needed */
+       unsigned int max_headroom = 0; /* The extra header space needed */
        int    gre_hlen;
        struct ipv6_tel_txoption opt;
        int    mtu;
@@ -693,7 +693,7 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
 
        skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(dev)));
 
-       max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen + dst->header_len;
+       max_headroom += LL_RESERVED_SPACE(tdev) + gre_hlen + dst->header_len;
 
        if (skb_headroom(skb) < max_headroom || skb_shared(skb) ||
            (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
index 3a692d5291636571266c6e26293bc7054228f4e3..a54c45ce4a48f0d3a65f6c54ac77bb73a6a41280 100644 (file)
@@ -1015,6 +1015,8 @@ static inline int ip6_ufo_append_data(struct sock *sk,
         * udp datagram
         */
        if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
+               struct frag_hdr fhdr;
+
                skb = sock_alloc_send_skb(sk,
                        hh_len + fragheaderlen + transhdrlen + 20,
                        (flags & MSG_DONTWAIT), &err);
@@ -1036,12 +1038,6 @@ static inline int ip6_ufo_append_data(struct sock *sk,
                skb->protocol = htons(ETH_P_IPV6);
                skb->ip_summed = CHECKSUM_PARTIAL;
                skb->csum = 0;
-       }
-
-       err = skb_append_datato_frags(sk,skb, getfrag, from,
-                                     (length - transhdrlen));
-       if (!err) {
-               struct frag_hdr fhdr;
 
                /* Specify the length of each IPv6 datagram fragment.
                 * It has to be a multiple of 8.
@@ -1052,15 +1048,10 @@ static inline int ip6_ufo_append_data(struct sock *sk,
                ipv6_select_ident(&fhdr, rt);
                skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
                __skb_queue_tail(&sk->sk_write_queue, skb);
-
-               return 0;
        }
-       /* There is not enough support do UPD LSO,
-        * so follow normal path
-        */
-       kfree_skb(skb);
 
-       return err;
+       return skb_append_datato_frags(sk, skb, getfrag, from,
+                                      (length - transhdrlen));
 }
 
 static inline struct ipv6_opt_hdr *ip6_opt_dup(struct ipv6_opt_hdr *src,
@@ -1227,27 +1218,27 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
         * --yoshfuji
         */
 
-       cork->length += length;
-       if (length > mtu) {
-               int proto = sk->sk_protocol;
-               if (dontfrag && (proto == IPPROTO_UDP || proto == IPPROTO_RAW)){
-                       ipv6_local_rxpmtu(sk, fl6, mtu-exthdrlen);
-                       return -EMSGSIZE;
-               }
-
-               if (proto == IPPROTO_UDP &&
-                   (rt->dst.dev->features & NETIF_F_UFO)) {
+       if ((length > mtu) && dontfrag && (sk->sk_protocol == IPPROTO_UDP ||
+                                          sk->sk_protocol == IPPROTO_RAW)) {
+               ipv6_local_rxpmtu(sk, fl6, mtu-exthdrlen);
+               return -EMSGSIZE;
+       }
 
-                       err = ip6_ufo_append_data(sk, getfrag, from, length,
-                                                 hh_len, fragheaderlen,
-                                                 transhdrlen, mtu, flags, rt);
-                       if (err)
-                               goto error;
-                       return 0;
-               }
+       skb = skb_peek_tail(&sk->sk_write_queue);
+       cork->length += length;
+       if (((length > mtu) ||
+            (skb && skb_is_gso(skb))) &&
+           (sk->sk_protocol == IPPROTO_UDP) &&
+           (rt->dst.dev->features & NETIF_F_UFO)) {
+               err = ip6_ufo_append_data(sk, getfrag, from, length,
+                                         hh_len, fragheaderlen,
+                                         transhdrlen, mtu, flags, rt);
+               if (err)
+                       goto error;
+               return 0;
        }
 
-       if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
+       if (!skb)
                goto alloc_new_skb;
 
        while (length > 0) {
index 2d8f4829575b2d410ce74014287b97361e2abf38..a791552e042212d866b2cd96c35e2f0b5d289ab5 100644 (file)
@@ -1731,8 +1731,6 @@ static void __net_exit ip6_tnl_destroy_tunnels(struct ip6_tnl_net *ip6n)
                }
        }
 
-       t = rtnl_dereference(ip6n->tnls_wc[0]);
-       unregister_netdevice_queue(t->dev, &list);
        unregister_netdevice_many(&list);
 }
 
@@ -1752,6 +1750,7 @@ static int __net_init ip6_tnl_init_net(struct net *net)
        if (!ip6n->fb_tnl_dev)
                goto err_alloc_dev;
        dev_net_set(ip6n->fb_tnl_dev, net);
+       ip6n->fb_tnl_dev->rtnl_link_ops = &ip6_link_ops;
        /* FB netdevice is special: we have one, and only one per netns.
         * Allowing to move it to another netns is clearly unsafe.
         */
index 096cd67b737c4b4eba2d42470d7a58e2e229153c..d18f9f903db62333983d3fad0e1ccb9298762c98 100644 (file)
@@ -2034,7 +2034,7 @@ static void mld_dad_timer_expire(unsigned long data)
                if (idev->mc_dad_count)
                        mld_dad_start_timer(idev, idev->mc_maxdelay);
        }
-       __in6_dev_put(idev);
+       in6_dev_put(idev);
 }
 
 static int ip6_mc_del1_src(struct ifmcaddr6 *pmc, int sfmode,
@@ -2379,7 +2379,7 @@ static void mld_gq_timer_expire(unsigned long data)
 
        idev->mc_gq_running = 0;
        mld_send_report(idev, NULL);
-       __in6_dev_put(idev);
+       in6_dev_put(idev);
 }
 
 static void mld_ifc_timer_expire(unsigned long data)
@@ -2392,7 +2392,7 @@ static void mld_ifc_timer_expire(unsigned long data)
                if (idev->mc_ifc_count)
                        mld_ifc_start_timer(idev, idev->mc_maxdelay);
        }
-       __in6_dev_put(idev);
+       in6_dev_put(idev);
 }
 
 static void mld_ifc_event(struct inet6_dev *idev)
index 19cfea8dbcaa0547c85e40f4217105ef46c7b683..2748b042da72eceb4002cf4183a781282c5e84d4 100644 (file)
@@ -282,7 +282,8 @@ synproxy_tg6(struct sk_buff *skb, const struct xt_action_param *par)
        if (th == NULL)
                return NF_DROP;
 
-       synproxy_parse_options(skb, par->thoff, th, &opts);
+       if (!synproxy_parse_options(skb, par->thoff, th, &opts))
+               return NF_DROP;
 
        if (th->syn && !(th->ack || th->fin || th->rst)) {
                /* Initial SYN from client */
@@ -372,7 +373,8 @@ static unsigned int ipv6_synproxy_hook(unsigned int hooknum,
 
                /* fall through */
        case TCP_CONNTRACK_SYN_SENT:
-               synproxy_parse_options(skb, thoff, th, &opts);
+               if (!synproxy_parse_options(skb, thoff, th, &opts))
+                       return NF_DROP;
 
                if (!th->syn && th->ack &&
                    CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) {
@@ -395,7 +397,9 @@ static unsigned int ipv6_synproxy_hook(unsigned int hooknum,
                if (!th->syn || !th->ack)
                        break;
 
-               synproxy_parse_options(skb, thoff, th, &opts);
+               if (!synproxy_parse_options(skb, thoff, th, &opts))
+                       return NF_DROP;
+
                if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP)
                        synproxy->tsoff = opts.tsval - synproxy->its;
 
index 58916bbb17284ed441611d0210c9514141ff9bf4..a4ed2416399ed52622b9c60460d411b9507e6d85 100644 (file)
@@ -335,8 +335,10 @@ static void rawv6_err(struct sock *sk, struct sk_buff *skb,
                ip6_sk_update_pmtu(skb, sk, info);
                harderr = (np->pmtudisc == IPV6_PMTUDISC_DO);
        }
-       if (type == NDISC_REDIRECT)
+       if (type == NDISC_REDIRECT) {
                ip6_sk_redirect(skb, sk);
+               return;
+       }
        if (np->recverr) {
                u8 *payload = skb->data;
                if (!inet->hdrincl)
index c979dd96d82a838534ae0105dbfb9e665ad410e6..c3130ffc3bca6af51e8126fcf4541a52a20b2a2a 100644 (file)
@@ -1137,7 +1137,6 @@ void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
        memset(&fl6, 0, sizeof(fl6));
        fl6.flowi6_oif = oif;
        fl6.flowi6_mark = mark;
-       fl6.flowi6_flags = 0;
        fl6.daddr = iph->daddr;
        fl6.saddr = iph->saddr;
        fl6.flowlabel = ip6_flowinfo(iph);
@@ -1236,7 +1235,6 @@ void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark)
        memset(&fl6, 0, sizeof(fl6));
        fl6.flowi6_oif = oif;
        fl6.flowi6_mark = mark;
-       fl6.flowi6_flags = 0;
        fl6.daddr = iph->daddr;
        fl6.saddr = iph->saddr;
        fl6.flowlabel = ip6_flowinfo(iph);
@@ -1258,7 +1256,6 @@ void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif,
        memset(&fl6, 0, sizeof(fl6));
        fl6.flowi6_oif = oif;
        fl6.flowi6_mark = mark;
-       fl6.flowi6_flags = 0;
        fl6.daddr = msg->dest;
        fl6.saddr = iph->daddr;
 
@@ -2800,56 +2797,12 @@ static int ip6_route_dev_notify(struct notifier_block *this,
 
 #ifdef CONFIG_PROC_FS
 
-struct rt6_proc_arg
-{
-       char *buffer;
-       int offset;
-       int length;
-       int skip;
-       int len;
-};
-
-static int rt6_info_route(struct rt6_info *rt, void *p_arg)
-{
-       struct seq_file *m = p_arg;
-
-       seq_printf(m, "%pi6 %02x ", &rt->rt6i_dst.addr, rt->rt6i_dst.plen);
-
-#ifdef CONFIG_IPV6_SUBTREES
-       seq_printf(m, "%pi6 %02x ", &rt->rt6i_src.addr, rt->rt6i_src.plen);
-#else
-       seq_puts(m, "00000000000000000000000000000000 00 ");
-#endif
-       if (rt->rt6i_flags & RTF_GATEWAY) {
-               seq_printf(m, "%pi6", &rt->rt6i_gateway);
-       } else {
-               seq_puts(m, "00000000000000000000000000000000");
-       }
-       seq_printf(m, " %08x %08x %08x %08x %8s\n",
-                  rt->rt6i_metric, atomic_read(&rt->dst.__refcnt),
-                  rt->dst.__use, rt->rt6i_flags,
-                  rt->dst.dev ? rt->dst.dev->name : "");
-       return 0;
-}
-
-static int ipv6_route_show(struct seq_file *m, void *v)
-{
-       struct net *net = (struct net *)m->private;
-       fib6_clean_all_ro(net, rt6_info_route, 0, m);
-       return 0;
-}
-
-static int ipv6_route_open(struct inode *inode, struct file *file)
-{
-       return single_open_net(inode, file, ipv6_route_show);
-}
-
 static const struct file_operations ipv6_route_proc_fops = {
        .owner          = THIS_MODULE,
        .open           = ipv6_route_open,
        .read           = seq_read,
        .llseek         = seq_lseek,
-       .release        = single_release_net,
+       .release        = seq_release_net,
 };
 
 static int rt6_stats_seq_show(struct seq_file *seq, void *v)
index 7ee5cb96db348ab5c75946e77d3068c3c447ba17..19269453a8eaca1d34eefa648e401ccea7d829ac 100644 (file)
@@ -566,6 +566,70 @@ static inline bool is_spoofed_6rd(struct ip_tunnel *tunnel, const __be32 v4addr,
        return false;
 }
 
+/* Checks if an address matches an address on the tunnel interface.
+ * Used to detect the NAT of proto 41 packets and let them pass spoofing test.
+ * Long story:
+ * This function is called after we considered the packet as spoofed
+ * in is_spoofed_6rd.
+ * We may have a router that is doing NAT for proto 41 packets
+ * for an internal station. Destination a.a.a.a/PREFIX:bbbb:bbbb
+ * will be translated to n.n.n.n/PREFIX:bbbb:bbbb. And is_spoofed_6rd
+ * function will return true, dropping the packet.
+ * But, we can still check if is spoofed against the IP
+ * addresses associated with the interface.
+ */
+static bool only_dnatted(const struct ip_tunnel *tunnel,
+       const struct in6_addr *v6dst)
+{
+       int prefix_len;
+
+#ifdef CONFIG_IPV6_SIT_6RD
+       prefix_len = tunnel->ip6rd.prefixlen + 32
+               - tunnel->ip6rd.relay_prefixlen;
+#else
+       prefix_len = 48;
+#endif
+       return ipv6_chk_custom_prefix(v6dst, prefix_len, tunnel->dev);
+}
+
+/* Returns true if a packet is spoofed */
+static bool packet_is_spoofed(struct sk_buff *skb,
+                             const struct iphdr *iph,
+                             struct ip_tunnel *tunnel)
+{
+       const struct ipv6hdr *ipv6h;
+
+       if (tunnel->dev->priv_flags & IFF_ISATAP) {
+               if (!isatap_chksrc(skb, iph, tunnel))
+                       return true;
+
+               return false;
+       }
+
+       if (tunnel->dev->flags & IFF_POINTOPOINT)
+               return false;
+
+       ipv6h = ipv6_hdr(skb);
+
+       if (unlikely(is_spoofed_6rd(tunnel, iph->saddr, &ipv6h->saddr))) {
+               net_warn_ratelimited("Src spoofed %pI4/%pI6c -> %pI4/%pI6c\n",
+                                    &iph->saddr, &ipv6h->saddr,
+                                    &iph->daddr, &ipv6h->daddr);
+               return true;
+       }
+
+       if (likely(!is_spoofed_6rd(tunnel, iph->daddr, &ipv6h->daddr)))
+               return false;
+
+       if (only_dnatted(tunnel, &ipv6h->daddr))
+               return false;
+
+       net_warn_ratelimited("Dst spoofed %pI4/%pI6c -> %pI4/%pI6c\n",
+                            &iph->saddr, &ipv6h->saddr,
+                            &iph->daddr, &ipv6h->daddr);
+       return true;
+}
+
 static int ipip6_rcv(struct sk_buff *skb)
 {
        const struct iphdr *iph = ip_hdr(skb);
@@ -586,19 +650,9 @@ static int ipip6_rcv(struct sk_buff *skb)
                IPCB(skb)->flags = 0;
                skb->protocol = htons(ETH_P_IPV6);
 
-               if (tunnel->dev->priv_flags & IFF_ISATAP) {
-                       if (!isatap_chksrc(skb, iph, tunnel)) {
-                               tunnel->dev->stats.rx_errors++;
-                               goto out;
-                       }
-               } else if (!(tunnel->dev->flags&IFF_POINTOPOINT)) {
-                       if (is_spoofed_6rd(tunnel, iph->saddr,
-                                          &ipv6_hdr(skb)->saddr) ||
-                           is_spoofed_6rd(tunnel, iph->daddr,
-                                          &ipv6_hdr(skb)->daddr)) {
-                               tunnel->dev->stats.rx_errors++;
-                               goto out;
-                       }
+               if (packet_is_spoofed(skb, iph, tunnel)) {
+                       tunnel->dev->stats.rx_errors++;
+                       goto out;
                }
 
                __skb_tunnel_rx(skb, tunnel->dev, tunnel->net);
@@ -748,7 +802,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
                        neigh = dst_neigh_lookup(skb_dst(skb), &iph6->daddr);
 
                if (neigh == NULL) {
-                       net_dbg_ratelimited("sit: nexthop == NULL\n");
+                       net_dbg_ratelimited("nexthop == NULL\n");
                        goto tx_error;
                }
 
@@ -777,7 +831,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
                        neigh = dst_neigh_lookup(skb_dst(skb), &iph6->daddr);
 
                if (neigh == NULL) {
-                       net_dbg_ratelimited("sit: nexthop == NULL\n");
+                       net_dbg_ratelimited("nexthop == NULL\n");
                        goto tx_error;
                }
 
@@ -1612,6 +1666,7 @@ static int __net_init sit_init_net(struct net *net)
                goto err_alloc_dev;
        }
        dev_net_set(sitn->fb_tunnel_dev, net);
+       sitn->fb_tunnel_dev->rtnl_link_ops = &sit_link_ops;
        /* FB netdevice is special: we have one, and only one per netns.
         * Allowing to move it to another netns is clearly unsafe.
         */
@@ -1646,7 +1701,6 @@ static void __net_exit sit_exit_net(struct net *net)
 
        rtnl_lock();
        sit_destroy_tunnels(sitn, &list);
-       unregister_netdevice_queue(sitn->fb_tunnel_dev, &list);
        unregister_netdevice_many(&list);
        rtnl_unlock();
 }
index 5c71501fc917d6271f72cea50cb98b9ad783f1c4..dde8bad0448178429630e1b739b31a3da04f26a6 100644 (file)
@@ -1811,7 +1811,7 @@ static void get_timewait6_sock(struct seq_file *seq,
        const struct in6_addr *dest, *src;
        __u16 destp, srcp;
        const struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
-       long delta = tw->tw_ttd - jiffies;
+       s32 delta = tw->tw_ttd - inet_tw_time_stamp();
 
        dest = &tw6->tw_v6_daddr;
        src  = &tw6->tw_v6_rcv_saddr;
index f4058150262b111d316a423e1c9ddc7ee8bb29a0..8119791e8a95317ddd880679d59bf0730828c1ca 100644 (file)
@@ -57,8 +57,6 @@ int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
 {
        const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
        const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2);
-       __be32 sk1_rcv_saddr = sk_rcv_saddr(sk);
-       __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2);
        int sk_ipv6only = ipv6_only_sock(sk);
        int sk2_ipv6only = inet_v6_ipv6only(sk2);
        int addr_type = ipv6_addr_type(sk_rcv_saddr6);
@@ -67,8 +65,8 @@ int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
        /* if both are mapped, treat as IPv4 */
        if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED)
                return (!sk2_ipv6only &&
-                       (!sk1_rcv_saddr || !sk2_rcv_saddr ||
-                         sk1_rcv_saddr == sk2_rcv_saddr));
+                       (!sk->sk_rcv_saddr || !sk2->sk_rcv_saddr ||
+                         sk->sk_rcv_saddr == sk2->sk_rcv_saddr));
 
        if (addr_type2 == IPV6_ADDR_ANY &&
            !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED))
@@ -525,8 +523,10 @@ void __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 
        if (type == ICMPV6_PKT_TOOBIG)
                ip6_sk_update_pmtu(skb, sk, info);
-       if (type == NDISC_REDIRECT)
+       if (type == NDISC_REDIRECT) {
                ip6_sk_redirect(skb, sk);
+               goto out;
+       }
 
        np = inet6_sk(sk);
 
index 54563ad8aeb1f02bbedd18959c8cf9cd77479942..355cc3b6fa4d3e4040bf9fb1578fc43c6c4f7b52 100644 (file)
@@ -154,6 +154,7 @@ static void lapb_t1timer_expiry(unsigned long param)
                        } else {
                                lapb->n2count++;
                                lapb_requeue_frames(lapb);
+                               lapb_kick(lapb);
                        }
                        break;
 
index 1aba645882bd92abbf9e5a4cdd90f7544a173c4a..3fb9dd6d02fc6df636ef84ed5ffea988598a5df6 100644 (file)
@@ -77,13 +77,13 @@ DECLARE_EVENT_CLASS(local_sdata_addr_evt,
        TP_STRUCT__entry(
                LOCAL_ENTRY
                VIF_ENTRY
-               __array(char, addr, 6)
+               __array(char, addr, ETH_ALEN)
        ),
 
        TP_fast_assign(
                LOCAL_ASSIGN;
                VIF_ASSIGN;
-               memcpy(__entry->addr, sdata->vif.addr, 6);
+               memcpy(__entry->addr, sdata->vif.addr, ETH_ALEN);
        ),
 
        TP_printk(
index 4f69e83ff836b0ec415342772055a58a52c25fc4..74fd00c272100d5271533689abf074c5f9e8c454 100644 (file)
@@ -116,6 +116,7 @@ ip_vs_in_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
 
        if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
                struct ip_vs_cpu_stats *s;
+               struct ip_vs_service *svc;
 
                s = this_cpu_ptr(dest->stats.cpustats);
                s->ustats.inpkts++;
@@ -123,11 +124,14 @@ ip_vs_in_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
                s->ustats.inbytes += skb->len;
                u64_stats_update_end(&s->syncp);
 
-               s = this_cpu_ptr(dest->svc->stats.cpustats);
+               rcu_read_lock();
+               svc = rcu_dereference(dest->svc);
+               s = this_cpu_ptr(svc->stats.cpustats);
                s->ustats.inpkts++;
                u64_stats_update_begin(&s->syncp);
                s->ustats.inbytes += skb->len;
                u64_stats_update_end(&s->syncp);
+               rcu_read_unlock();
 
                s = this_cpu_ptr(ipvs->tot_stats.cpustats);
                s->ustats.inpkts++;
@@ -146,6 +150,7 @@ ip_vs_out_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
 
        if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
                struct ip_vs_cpu_stats *s;
+               struct ip_vs_service *svc;
 
                s = this_cpu_ptr(dest->stats.cpustats);
                s->ustats.outpkts++;
@@ -153,11 +158,14 @@ ip_vs_out_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
                s->ustats.outbytes += skb->len;
                u64_stats_update_end(&s->syncp);
 
-               s = this_cpu_ptr(dest->svc->stats.cpustats);
+               rcu_read_lock();
+               svc = rcu_dereference(dest->svc);
+               s = this_cpu_ptr(svc->stats.cpustats);
                s->ustats.outpkts++;
                u64_stats_update_begin(&s->syncp);
                s->ustats.outbytes += skb->len;
                u64_stats_update_end(&s->syncp);
+               rcu_read_unlock();
 
                s = this_cpu_ptr(ipvs->tot_stats.cpustats);
                s->ustats.outpkts++;
index c8148e48738657d63810a1dade924267dc032d01..a3df9bddc4f76251a8722792d8e9f15478546ac1 100644 (file)
@@ -460,7 +460,7 @@ static inline void
 __ip_vs_bind_svc(struct ip_vs_dest *dest, struct ip_vs_service *svc)
 {
        atomic_inc(&svc->refcnt);
-       dest->svc = svc;
+       rcu_assign_pointer(dest->svc, svc);
 }
 
 static void ip_vs_service_free(struct ip_vs_service *svc)
@@ -470,18 +470,25 @@ static void ip_vs_service_free(struct ip_vs_service *svc)
        kfree(svc);
 }
 
-static void
-__ip_vs_unbind_svc(struct ip_vs_dest *dest)
+static void ip_vs_service_rcu_free(struct rcu_head *head)
 {
-       struct ip_vs_service *svc = dest->svc;
+       struct ip_vs_service *svc;
+
+       svc = container_of(head, struct ip_vs_service, rcu_head);
+       ip_vs_service_free(svc);
+}
 
-       dest->svc = NULL;
+static void __ip_vs_svc_put(struct ip_vs_service *svc, bool do_delay)
+{
        if (atomic_dec_and_test(&svc->refcnt)) {
                IP_VS_DBG_BUF(3, "Removing service %u/%s:%u\n",
                              svc->fwmark,
                              IP_VS_DBG_ADDR(svc->af, &svc->addr),
                              ntohs(svc->port));
-               ip_vs_service_free(svc);
+               if (do_delay)
+                       call_rcu(&svc->rcu_head, ip_vs_service_rcu_free);
+               else
+                       ip_vs_service_free(svc);
        }
 }
 
@@ -667,11 +674,6 @@ ip_vs_trash_get_dest(struct ip_vs_service *svc, const union nf_inet_addr *daddr,
                              IP_VS_DBG_ADDR(svc->af, &dest->addr),
                              ntohs(dest->port),
                              atomic_read(&dest->refcnt));
-               /* We can not reuse dest while in grace period
-                * because conns still can use dest->svc
-                */
-               if (test_bit(IP_VS_DEST_STATE_REMOVING, &dest->state))
-                       continue;
                if (dest->af == svc->af &&
                    ip_vs_addr_equal(svc->af, &dest->addr, daddr) &&
                    dest->port == dport &&
@@ -697,8 +699,10 @@ out:
 
 static void ip_vs_dest_free(struct ip_vs_dest *dest)
 {
+       struct ip_vs_service *svc = rcu_dereference_protected(dest->svc, 1);
+
        __ip_vs_dst_cache_reset(dest);
-       __ip_vs_unbind_svc(dest);
+       __ip_vs_svc_put(svc, false);
        free_percpu(dest->stats.cpustats);
        kfree(dest);
 }
@@ -771,6 +775,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
                    struct ip_vs_dest_user_kern *udest, int add)
 {
        struct netns_ipvs *ipvs = net_ipvs(svc->net);
+       struct ip_vs_service *old_svc;
        struct ip_vs_scheduler *sched;
        int conn_flags;
 
@@ -792,13 +797,14 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
        atomic_set(&dest->conn_flags, conn_flags);
 
        /* bind the service */
-       if (!dest->svc) {
+       old_svc = rcu_dereference_protected(dest->svc, 1);
+       if (!old_svc) {
                __ip_vs_bind_svc(dest, svc);
        } else {
-               if (dest->svc != svc) {
-                       __ip_vs_unbind_svc(dest);
+               if (old_svc != svc) {
                        ip_vs_zero_stats(&dest->stats);
                        __ip_vs_bind_svc(dest, svc);
+                       __ip_vs_svc_put(old_svc, true);
                }
        }
 
@@ -998,16 +1004,6 @@ ip_vs_edit_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest)
        return 0;
 }
 
-static void ip_vs_dest_wait_readers(struct rcu_head *head)
-{
-       struct ip_vs_dest *dest = container_of(head, struct ip_vs_dest,
-                                              rcu_head);
-
-       /* End of grace period after unlinking */
-       clear_bit(IP_VS_DEST_STATE_REMOVING, &dest->state);
-}
-
-
 /*
  *     Delete a destination (must be already unlinked from the service)
  */
@@ -1023,20 +1019,16 @@ static void __ip_vs_del_dest(struct net *net, struct ip_vs_dest *dest,
         */
        ip_vs_rs_unhash(dest);
 
-       if (!cleanup) {
-               set_bit(IP_VS_DEST_STATE_REMOVING, &dest->state);
-               call_rcu(&dest->rcu_head, ip_vs_dest_wait_readers);
-       }
-
        spin_lock_bh(&ipvs->dest_trash_lock);
        IP_VS_DBG_BUF(3, "Moving dest %s:%u into trash, dest->refcnt=%d\n",
                      IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port),
                      atomic_read(&dest->refcnt));
        if (list_empty(&ipvs->dest_trash) && !cleanup)
                mod_timer(&ipvs->dest_trash_timer,
-                         jiffies + IP_VS_DEST_TRASH_PERIOD);
+                         jiffies + (IP_VS_DEST_TRASH_PERIOD >> 1));
        /* dest lives in trash without reference */
        list_add(&dest->t_list, &ipvs->dest_trash);
+       dest->idle_start = 0;
        spin_unlock_bh(&ipvs->dest_trash_lock);
        ip_vs_dest_put(dest);
 }
@@ -1108,24 +1100,30 @@ static void ip_vs_dest_trash_expire(unsigned long data)
        struct net *net = (struct net *) data;
        struct netns_ipvs *ipvs = net_ipvs(net);
        struct ip_vs_dest *dest, *next;
+       unsigned long now = jiffies;
 
        spin_lock(&ipvs->dest_trash_lock);
        list_for_each_entry_safe(dest, next, &ipvs->dest_trash, t_list) {
-               /* Skip if dest is in grace period */
-               if (test_bit(IP_VS_DEST_STATE_REMOVING, &dest->state))
-                       continue;
                if (atomic_read(&dest->refcnt) > 0)
                        continue;
+               if (dest->idle_start) {
+                       if (time_before(now, dest->idle_start +
+                                            IP_VS_DEST_TRASH_PERIOD))
+                               continue;
+               } else {
+                       dest->idle_start = max(1UL, now);
+                       continue;
+               }
                IP_VS_DBG_BUF(3, "Removing destination %u/%s:%u from trash\n",
                              dest->vfwmark,
-                             IP_VS_DBG_ADDR(dest->svc->af, &dest->addr),
+                             IP_VS_DBG_ADDR(dest->af, &dest->addr),
                              ntohs(dest->port));
                list_del(&dest->t_list);
                ip_vs_dest_free(dest);
        }
        if (!list_empty(&ipvs->dest_trash))
                mod_timer(&ipvs->dest_trash_timer,
-                         jiffies + IP_VS_DEST_TRASH_PERIOD);
+                         jiffies + (IP_VS_DEST_TRASH_PERIOD >> 1));
        spin_unlock(&ipvs->dest_trash_lock);
 }
 
@@ -1320,14 +1318,6 @@ out:
        return ret;
 }
 
-static void ip_vs_service_rcu_free(struct rcu_head *head)
-{
-       struct ip_vs_service *svc;
-
-       svc = container_of(head, struct ip_vs_service, rcu_head);
-       ip_vs_service_free(svc);
-}
-
 /*
  *     Delete a service from the service list
  *     - The service must be unlinked, unlocked and not referenced!
@@ -1376,13 +1366,7 @@ static void __ip_vs_del_service(struct ip_vs_service *svc, bool cleanup)
        /*
         *    Free the service if nobody refers to it
         */
-       if (atomic_dec_and_test(&svc->refcnt)) {
-               IP_VS_DBG_BUF(3, "Removing service %u/%s:%u\n",
-                             svc->fwmark,
-                             IP_VS_DBG_ADDR(svc->af, &svc->addr),
-                             ntohs(svc->port));
-               call_rcu(&svc->rcu_head, ip_vs_service_rcu_free);
-       }
+       __ip_vs_svc_put(svc, true);
 
        /* decrease the module use count */
        ip_vs_use_count_dec();
index 6bee6d0c73a52e93e1413162b4db971340fa3312..1425e9a924c4f64429637bc49cbde204b0bb1921 100644 (file)
@@ -59,12 +59,13 @@ static void ip_vs_read_cpu_stats(struct ip_vs_stats_user *sum,
                                 struct ip_vs_cpu_stats __percpu *stats)
 {
        int i;
+       bool add = false;
 
        for_each_possible_cpu(i) {
                struct ip_vs_cpu_stats *s = per_cpu_ptr(stats, i);
                unsigned int start;
                __u64 inbytes, outbytes;
-               if (i) {
+               if (add) {
                        sum->conns += s->ustats.conns;
                        sum->inpkts += s->ustats.inpkts;
                        sum->outpkts += s->ustats.outpkts;
@@ -76,6 +77,7 @@ static void ip_vs_read_cpu_stats(struct ip_vs_stats_user *sum,
                        sum->inbytes += inbytes;
                        sum->outbytes += outbytes;
                } else {
+                       add = true;
                        sum->conns = s->ustats.conns;
                        sum->inpkts = s->ustats.inpkts;
                        sum->outpkts = s->ustats.outpkts;
index 1383b0eadc0e777d5ff6dbce2a2ded451a3ae296..eff13c94498e068173c66b6a481a6264d038ffea 100644 (file)
@@ -93,7 +93,7 @@ struct ip_vs_lblc_entry {
        struct hlist_node       list;
        int                     af;             /* address family */
        union nf_inet_addr      addr;           /* destination IP address */
-       struct ip_vs_dest __rcu *dest;          /* real server (cache) */
+       struct ip_vs_dest       *dest;          /* real server (cache) */
        unsigned long           lastuse;        /* last used time */
        struct rcu_head         rcu_head;
 };
@@ -130,20 +130,21 @@ static struct ctl_table vs_vars_table[] = {
 };
 #endif
 
-static inline void ip_vs_lblc_free(struct ip_vs_lblc_entry *en)
+static void ip_vs_lblc_rcu_free(struct rcu_head *head)
 {
-       struct ip_vs_dest *dest;
+       struct ip_vs_lblc_entry *en = container_of(head,
+                                                  struct ip_vs_lblc_entry,
+                                                  rcu_head);
 
-       hlist_del_rcu(&en->list);
-       /*
-        * We don't kfree dest because it is referred either by its service
-        * or the trash dest list.
-        */
-       dest = rcu_dereference_protected(en->dest, 1);
-       ip_vs_dest_put(dest);
-       kfree_rcu(en, rcu_head);
+       ip_vs_dest_put(en->dest);
+       kfree(en);
 }
 
+static inline void ip_vs_lblc_del(struct ip_vs_lblc_entry *en)
+{
+       hlist_del_rcu(&en->list);
+       call_rcu(&en->rcu_head, ip_vs_lblc_rcu_free);
+}
 
 /*
  *     Returns hash value for IPVS LBLC entry
@@ -203,30 +204,23 @@ ip_vs_lblc_new(struct ip_vs_lblc_table *tbl, const union nf_inet_addr *daddr,
        struct ip_vs_lblc_entry *en;
 
        en = ip_vs_lblc_get(dest->af, tbl, daddr);
-       if (!en) {
-               en = kmalloc(sizeof(*en), GFP_ATOMIC);
-               if (!en)
-                       return NULL;
-
-               en->af = dest->af;
-               ip_vs_addr_copy(dest->af, &en->addr, daddr);
-               en->lastuse = jiffies;
+       if (en) {
+               if (en->dest == dest)
+                       return en;
+               ip_vs_lblc_del(en);
+       }
+       en = kmalloc(sizeof(*en), GFP_ATOMIC);
+       if (!en)
+               return NULL;
 
-               ip_vs_dest_hold(dest);
-               RCU_INIT_POINTER(en->dest, dest);
+       en->af = dest->af;
+       ip_vs_addr_copy(dest->af, &en->addr, daddr);
+       en->lastuse = jiffies;
 
-               ip_vs_lblc_hash(tbl, en);
-       } else {
-               struct ip_vs_dest *old_dest;
+       ip_vs_dest_hold(dest);
+       en->dest = dest;
 
-               old_dest = rcu_dereference_protected(en->dest, 1);
-               if (old_dest != dest) {
-                       ip_vs_dest_put(old_dest);
-                       ip_vs_dest_hold(dest);
-                       /* No ordering constraints for refcnt */
-                       RCU_INIT_POINTER(en->dest, dest);
-               }
-       }
+       ip_vs_lblc_hash(tbl, en);
 
        return en;
 }
@@ -246,7 +240,7 @@ static void ip_vs_lblc_flush(struct ip_vs_service *svc)
        tbl->dead = 1;
        for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) {
                hlist_for_each_entry_safe(en, next, &tbl->bucket[i], list) {
-                       ip_vs_lblc_free(en);
+                       ip_vs_lblc_del(en);
                        atomic_dec(&tbl->entries);
                }
        }
@@ -281,7 +275,7 @@ static inline void ip_vs_lblc_full_check(struct ip_vs_service *svc)
                                        sysctl_lblc_expiration(svc)))
                                continue;
 
-                       ip_vs_lblc_free(en);
+                       ip_vs_lblc_del(en);
                        atomic_dec(&tbl->entries);
                }
                spin_unlock(&svc->sched_lock);
@@ -335,7 +329,7 @@ static void ip_vs_lblc_check_expire(unsigned long data)
                        if (time_before(now, en->lastuse + ENTRY_TIMEOUT))
                                continue;
 
-                       ip_vs_lblc_free(en);
+                       ip_vs_lblc_del(en);
                        atomic_dec(&tbl->entries);
                        goal--;
                }
@@ -443,8 +437,8 @@ __ip_vs_lblc_schedule(struct ip_vs_service *svc)
                        continue;
 
                doh = ip_vs_dest_conn_overhead(dest);
-               if (loh * atomic_read(&dest->weight) >
-                   doh * atomic_read(&least->weight)) {
+               if ((__s64)loh * atomic_read(&dest->weight) >
+                   (__s64)doh * atomic_read(&least->weight)) {
                        least = dest;
                        loh = doh;
                }
@@ -511,7 +505,7 @@ ip_vs_lblc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
                 * free up entries from the trash at any time.
                 */
 
-               dest = rcu_dereference(en->dest);
+               dest = en->dest;
                if ((dest->flags & IP_VS_DEST_F_AVAILABLE) &&
                    atomic_read(&dest->weight) > 0 && !is_overloaded(dest, svc))
                        goto out;
@@ -631,7 +625,7 @@ static void __exit ip_vs_lblc_cleanup(void)
 {
        unregister_ip_vs_scheduler(&ip_vs_lblc_scheduler);
        unregister_pernet_subsys(&ip_vs_lblc_ops);
-       synchronize_rcu();
+       rcu_barrier();
 }
 
 
index 5199448697f64fcf0da0e35dbc38903477a92861..0b8550089a2e580e7feba0723117f1c849048a39 100644 (file)
@@ -89,7 +89,7 @@
  */
 struct ip_vs_dest_set_elem {
        struct list_head        list;          /* list link */
-       struct ip_vs_dest __rcu *dest;         /* destination server */
+       struct ip_vs_dest       *dest;          /* destination server */
        struct rcu_head         rcu_head;
 };
 
@@ -107,11 +107,7 @@ static void ip_vs_dest_set_insert(struct ip_vs_dest_set *set,
 
        if (check) {
                list_for_each_entry(e, &set->list, list) {
-                       struct ip_vs_dest *d;
-
-                       d = rcu_dereference_protected(e->dest, 1);
-                       if (d == dest)
-                               /* already existed */
+                       if (e->dest == dest)
                                return;
                }
        }
@@ -121,7 +117,7 @@ static void ip_vs_dest_set_insert(struct ip_vs_dest_set *set,
                return;
 
        ip_vs_dest_hold(dest);
-       RCU_INIT_POINTER(e->dest, dest);
+       e->dest = dest;
 
        list_add_rcu(&e->list, &set->list);
        atomic_inc(&set->size);
@@ -129,22 +125,27 @@ static void ip_vs_dest_set_insert(struct ip_vs_dest_set *set,
        set->lastmod = jiffies;
 }
 
+static void ip_vs_lblcr_elem_rcu_free(struct rcu_head *head)
+{
+       struct ip_vs_dest_set_elem *e;
+
+       e = container_of(head, struct ip_vs_dest_set_elem, rcu_head);
+       ip_vs_dest_put(e->dest);
+       kfree(e);
+}
+
 static void
 ip_vs_dest_set_erase(struct ip_vs_dest_set *set, struct ip_vs_dest *dest)
 {
        struct ip_vs_dest_set_elem *e;
 
        list_for_each_entry(e, &set->list, list) {
-               struct ip_vs_dest *d;
-
-               d = rcu_dereference_protected(e->dest, 1);
-               if (d == dest) {
+               if (e->dest == dest) {
                        /* HIT */
                        atomic_dec(&set->size);
                        set->lastmod = jiffies;
-                       ip_vs_dest_put(dest);
                        list_del_rcu(&e->list);
-                       kfree_rcu(e, rcu_head);
+                       call_rcu(&e->rcu_head, ip_vs_lblcr_elem_rcu_free);
                        break;
                }
        }
@@ -155,16 +156,8 @@ static void ip_vs_dest_set_eraseall(struct ip_vs_dest_set *set)
        struct ip_vs_dest_set_elem *e, *ep;
 
        list_for_each_entry_safe(e, ep, &set->list, list) {
-               struct ip_vs_dest *d;
-
-               d = rcu_dereference_protected(e->dest, 1);
-               /*
-                * We don't kfree dest because it is referred either
-                * by its service or by the trash dest list.
-                */
-               ip_vs_dest_put(d);
                list_del_rcu(&e->list);
-               kfree_rcu(e, rcu_head);
+               call_rcu(&e->rcu_head, ip_vs_lblcr_elem_rcu_free);
        }
 }
 
@@ -175,12 +168,9 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set)
        struct ip_vs_dest *dest, *least;
        int loh, doh;
 
-       if (set == NULL)
-               return NULL;
-
        /* select the first destination server, whose weight > 0 */
        list_for_each_entry_rcu(e, &set->list, list) {
-               least = rcu_dereference(e->dest);
+               least = e->dest;
                if (least->flags & IP_VS_DEST_F_OVERLOAD)
                        continue;
 
@@ -195,13 +185,13 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set)
        /* find the destination with the weighted least load */
   nextstage:
        list_for_each_entry_continue_rcu(e, &set->list, list) {
-               dest = rcu_dereference(e->dest);
+               dest = e->dest;
                if (dest->flags & IP_VS_DEST_F_OVERLOAD)
                        continue;
 
                doh = ip_vs_dest_conn_overhead(dest);
-               if ((loh * atomic_read(&dest->weight) >
-                    doh * atomic_read(&least->weight))
+               if (((__s64)loh * atomic_read(&dest->weight) >
+                    (__s64)doh * atomic_read(&least->weight))
                    && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
                        least = dest;
                        loh = doh;
@@ -232,7 +222,7 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set)
 
        /* select the first destination server, whose weight > 0 */
        list_for_each_entry(e, &set->list, list) {
-               most = rcu_dereference_protected(e->dest, 1);
+               most = e->dest;
                if (atomic_read(&most->weight) > 0) {
                        moh = ip_vs_dest_conn_overhead(most);
                        goto nextstage;
@@ -243,11 +233,11 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set)
        /* find the destination with the weighted most load */
   nextstage:
        list_for_each_entry_continue(e, &set->list, list) {
-               dest = rcu_dereference_protected(e->dest, 1);
+               dest = e->dest;
                doh = ip_vs_dest_conn_overhead(dest);
                /* moh/mw < doh/dw ==> moh*dw < doh*mw, where mw,dw>0 */
-               if ((moh * atomic_read(&dest->weight) <
-                    doh * atomic_read(&most->weight))
+               if (((__s64)moh * atomic_read(&dest->weight) <
+                    (__s64)doh * atomic_read(&most->weight))
                    && (atomic_read(&dest->weight) > 0)) {
                        most = dest;
                        moh = doh;
@@ -611,8 +601,8 @@ __ip_vs_lblcr_schedule(struct ip_vs_service *svc)
                        continue;
 
                doh = ip_vs_dest_conn_overhead(dest);
-               if (loh * atomic_read(&dest->weight) >
-                   doh * atomic_read(&least->weight)) {
+               if ((__s64)loh * atomic_read(&dest->weight) >
+                   (__s64)doh * atomic_read(&least->weight)) {
                        least = dest;
                        loh = doh;
                }
@@ -819,7 +809,7 @@ static void __exit ip_vs_lblcr_cleanup(void)
 {
        unregister_ip_vs_scheduler(&ip_vs_lblcr_scheduler);
        unregister_pernet_subsys(&ip_vs_lblcr_ops);
-       synchronize_rcu();
+       rcu_barrier();
 }
 
 
index d8d9860934fee1e7f59505eeefa094307b2d58c3..961a6de9bb29035458945185f488a5fc1209ba00 100644 (file)
@@ -40,7 +40,7 @@
 #include <net/ip_vs.h>
 
 
-static inline unsigned int
+static inline int
 ip_vs_nq_dest_overhead(struct ip_vs_dest *dest)
 {
        /*
@@ -59,7 +59,7 @@ ip_vs_nq_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
                  struct ip_vs_iphdr *iph)
 {
        struct ip_vs_dest *dest, *least = NULL;
-       unsigned int loh = 0, doh;
+       int loh = 0, doh;
 
        IP_VS_DBG(6, "%s(): Scheduling...\n", __func__);
 
@@ -92,8 +92,8 @@ ip_vs_nq_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
                }
 
                if (!least ||
-                   (loh * atomic_read(&dest->weight) >
-                    doh * atomic_read(&least->weight))) {
+                   ((__s64)loh * atomic_read(&dest->weight) >
+                    (__s64)doh * atomic_read(&least->weight))) {
                        least = dest;
                        loh = doh;
                }
index a5284cc3d88279923b6a28b1f8a8f87bfc2a6a0f..e446b9fa7424c6382cb65433447f3febe2b17eeb 100644 (file)
@@ -44,7 +44,7 @@
 #include <net/ip_vs.h>
 
 
-static inline unsigned int
+static inline int
 ip_vs_sed_dest_overhead(struct ip_vs_dest *dest)
 {
        /*
@@ -63,7 +63,7 @@ ip_vs_sed_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
                   struct ip_vs_iphdr *iph)
 {
        struct ip_vs_dest *dest, *least;
-       unsigned int loh, doh;
+       int loh, doh;
 
        IP_VS_DBG(6, "%s(): Scheduling...\n", __func__);
 
@@ -99,8 +99,8 @@ ip_vs_sed_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
                if (dest->flags & IP_VS_DEST_F_OVERLOAD)
                        continue;
                doh = ip_vs_sed_dest_overhead(dest);
-               if (loh * atomic_read(&dest->weight) >
-                   doh * atomic_read(&least->weight)) {
+               if ((__s64)loh * atomic_read(&dest->weight) >
+                   (__s64)doh * atomic_read(&least->weight)) {
                        least = dest;
                        loh = doh;
                }
index 6dc1fa1288409067de8a19f53a32e174caa9adc9..b5b4650d50a9180f211e6cce82e3393ece2fc11c 100644 (file)
@@ -35,7 +35,7 @@ ip_vs_wlc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
                   struct ip_vs_iphdr *iph)
 {
        struct ip_vs_dest *dest, *least;
-       unsigned int loh, doh;
+       int loh, doh;
 
        IP_VS_DBG(6, "ip_vs_wlc_schedule(): Scheduling...\n");
 
@@ -71,8 +71,8 @@ ip_vs_wlc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
                if (dest->flags & IP_VS_DEST_F_OVERLOAD)
                        continue;
                doh = ip_vs_dest_conn_overhead(dest);
-               if (loh * atomic_read(&dest->weight) >
-                   doh * atomic_read(&least->weight)) {
+               if ((__s64)loh * atomic_read(&dest->weight) >
+                   (__s64)doh * atomic_read(&least->weight)) {
                        least = dest;
                        loh = doh;
                }
index 6fd967c6278c86fbc81ad232035891656750330a..cdf4567ba9b330929aec53eb1c75d57a7047106e 100644 (file)
@@ -24,7 +24,7 @@
 int synproxy_net_id;
 EXPORT_SYMBOL_GPL(synproxy_net_id);
 
-void
+bool
 synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
                       const struct tcphdr *th, struct synproxy_options *opts)
 {
@@ -32,7 +32,8 @@ synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
        u8 buf[40], *ptr;
 
        ptr = skb_header_pointer(skb, doff + sizeof(*th), length, buf);
-       BUG_ON(ptr == NULL);
+       if (ptr == NULL)
+               return false;
 
        opts->options = 0;
        while (length > 0) {
@@ -41,16 +42,16 @@ synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
 
                switch (opcode) {
                case TCPOPT_EOL:
-                       return;
+                       return true;
                case TCPOPT_NOP:
                        length--;
                        continue;
                default:
                        opsize = *ptr++;
                        if (opsize < 2)
-                               return;
+                               return true;
                        if (opsize > length)
-                               return;
+                               return true;
 
                        switch (opcode) {
                        case TCPOPT_MSS:
@@ -84,6 +85,7 @@ synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
                        length -= opsize;
                }
        }
+       return true;
 }
 EXPORT_SYMBOL_GPL(synproxy_parse_options);
 
index a481c03e2861f7bcc1de1ec65b56cbc142a2f73d..56e22b74cf965f57ceb90ad0be84b378bbac093f 100644 (file)
@@ -173,7 +173,7 @@ static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb)
 
        skb->local_df = 1;
 
-       inet_get_local_port_range(&port_min, &port_max);
+       inet_get_local_port_range(net, &port_min, &port_max);
        src_port = vxlan_src_port(port_min, port_max, skb);
 
        err = vxlan_xmit_skb(vxlan_port->vs, rt, skb,
index d76a35d0dc85b82ddd9b8f2cc037b1ff0d31f5d5..636d9131d87016a46597af5cadd3471d21e1798f 100644 (file)
@@ -137,7 +137,7 @@ static int basic_set_parms(struct net *net, struct tcf_proto *tp,
                           struct nlattr **tb,
                           struct nlattr *est)
 {
-       int err = -EINVAL;
+       int err;
        struct tcf_exts e;
        struct tcf_ematch_tree t;
 
index 7c3de6ffa5164db0f7abd3f0e2cc0ca2e92ecda9..e5cef95672256b650249c80bef36930f78be9f13 100644 (file)
@@ -793,8 +793,10 @@ static int em_meta_change(struct tcf_proto *tp, void *data, int len,
                goto errout;
 
        meta = kzalloc(sizeof(*meta), GFP_KERNEL);
-       if (meta == NULL)
+       if (meta == NULL) {
+               err = -ENOMEM;
                goto errout;
+       }
 
        memcpy(&meta->lvalue.hdr, &hdr->left, sizeof(hdr->left));
        memcpy(&meta->rvalue.hdr, &hdr->right, sizeof(hdr->right));
index 32ad015ee8ce4a9c5b967c22dd90631881f2362b..a2fef8b10b960c15d29556b7dc832737c848b3b6 100644 (file)
@@ -285,7 +285,7 @@ static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
 
 
 /* remove one skb from head of flow queue */
-static struct sk_buff *fq_dequeue_head(struct fq_flow *flow)
+static struct sk_buff *fq_dequeue_head(struct Qdisc *sch, struct fq_flow *flow)
 {
        struct sk_buff *skb = flow->head;
 
@@ -293,6 +293,8 @@ static struct sk_buff *fq_dequeue_head(struct fq_flow *flow)
                flow->head = skb->next;
                skb->next = NULL;
                flow->qlen--;
+               sch->qstats.backlog -= qdisc_pkt_len(skb);
+               sch->q.qlen--;
        }
        return skb;
 }
@@ -418,8 +420,9 @@ static struct sk_buff *fq_dequeue(struct Qdisc *sch)
        struct fq_flow_head *head;
        struct sk_buff *skb;
        struct fq_flow *f;
+       u32 rate;
 
-       skb = fq_dequeue_head(&q->internal);
+       skb = fq_dequeue_head(sch, &q->internal);
        if (skb)
                goto out;
        fq_check_throttled(q, now);
@@ -449,7 +452,7 @@ begin:
                goto begin;
        }
 
-       skb = fq_dequeue_head(f);
+       skb = fq_dequeue_head(sch, f);
        if (!skb) {
                head->first = f->next;
                /* force a pass through old_flows to prevent starvation */
@@ -466,43 +469,74 @@ begin:
        f->time_next_packet = now;
        f->credit -= qdisc_pkt_len(skb);
 
-       if (f->credit <= 0 &&
-           q->rate_enable &&
-           skb->sk && skb->sk->sk_state != TCP_TIME_WAIT) {
-               u32 rate = skb->sk->sk_pacing_rate ?: q->flow_default_rate;
+       if (f->credit > 0 || !q->rate_enable)
+               goto out;
 
-               rate = min(rate, q->flow_max_rate);
-               if (rate) {
-                       u64 len = (u64)qdisc_pkt_len(skb) * NSEC_PER_SEC;
-
-                       do_div(len, rate);
-                       /* Since socket rate can change later,
-                        * clamp the delay to 125 ms.
-                        * TODO: maybe segment the too big skb, as in commit
-                        * e43ac79a4bc ("sch_tbf: segment too big GSO packets")
-                        */
-                       if (unlikely(len > 125 * NSEC_PER_MSEC)) {
-                               len = 125 * NSEC_PER_MSEC;
-                               q->stat_pkts_too_long++;
-                       }
+       if (skb->sk && skb->sk->sk_state != TCP_TIME_WAIT) {
+               rate = skb->sk->sk_pacing_rate ?: q->flow_default_rate;
 
-                       f->time_next_packet = now + len;
+               rate = min(rate, q->flow_max_rate);
+       } else {
+               rate = q->flow_max_rate;
+               if (rate == ~0U)
+                       goto out;
+       }
+       if (rate) {
+               u32 plen = max(qdisc_pkt_len(skb), q->quantum);
+               u64 len = (u64)plen * NSEC_PER_SEC;
+
+               do_div(len, rate);
+               /* Since socket rate can change later,
+                * clamp the delay to 125 ms.
+                * TODO: maybe segment the too big skb, as in commit
+                * e43ac79a4bc ("sch_tbf: segment too big GSO packets")
+                */
+               if (unlikely(len > 125 * NSEC_PER_MSEC)) {
+                       len = 125 * NSEC_PER_MSEC;
+                       q->stat_pkts_too_long++;
                }
+
+               f->time_next_packet = now + len;
        }
 out:
-       sch->qstats.backlog -= qdisc_pkt_len(skb);
        qdisc_bstats_update(sch, skb);
-       sch->q.qlen--;
        qdisc_unthrottled(sch);
        return skb;
 }
 
 static void fq_reset(struct Qdisc *sch)
 {
+       struct fq_sched_data *q = qdisc_priv(sch);
+       struct rb_root *root;
        struct sk_buff *skb;
+       struct rb_node *p;
+       struct fq_flow *f;
+       unsigned int idx;
 
-       while ((skb = fq_dequeue(sch)) != NULL)
+       while ((skb = fq_dequeue_head(sch, &q->internal)) != NULL)
                kfree_skb(skb);
+
+       if (!q->fq_root)
+               return;
+
+       for (idx = 0; idx < (1U << q->fq_trees_log); idx++) {
+               root = &q->fq_root[idx];
+               while ((p = rb_first(root)) != NULL) {
+                       f = container_of(p, struct fq_flow, fq_node);
+                       rb_erase(p, root);
+
+                       while ((skb = fq_dequeue_head(sch, f)) != NULL)
+                               kfree_skb(skb);
+
+                       kmem_cache_free(fq_flow_cachep, f);
+               }
+       }
+       q->new_flows.first      = NULL;
+       q->old_flows.first      = NULL;
+       q->delayed              = RB_ROOT;
+       q->flows                = 0;
+       q->inactive_flows       = 0;
+       q->throttled_flows      = 0;
 }
 
 static void fq_rehash(struct fq_sched_data *q,
@@ -645,6 +679,8 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
        while (sch->q.qlen > sch->limit) {
                struct sk_buff *skb = fq_dequeue(sch);
 
+               if (!skb)
+                       break;
                kfree_skb(skb);
                drop_count++;
        }
@@ -657,21 +693,9 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
 static void fq_destroy(struct Qdisc *sch)
 {
        struct fq_sched_data *q = qdisc_priv(sch);
-       struct rb_root *root;
-       struct rb_node *p;
-       unsigned int idx;
 
-       if (q->fq_root) {
-               for (idx = 0; idx < (1U << q->fq_trees_log); idx++) {
-                       root = &q->fq_root[idx];
-                       while ((p = rb_first(root)) != NULL) {
-                               rb_erase(p, root);
-                               kmem_cache_free(fq_flow_cachep,
-                                               container_of(p, struct fq_flow, fq_node));
-                       }
-               }
-               kfree(q->fq_root);
-       }
+       fq_reset(sch);
+       kfree(q->fq_root);
        qdisc_watchdog_cancel(&q->watchdog);
 }
 
index 911b71b26b0e6670bfac1a7f8d450aea510f7333..72046b9729a8a6a669fb9c75446de8ef2fe345c6 100644 (file)
@@ -5890,7 +5890,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
                int low, high, remaining, index;
                unsigned int rover;
 
-               inet_get_local_port_range(&low, &high);
+               inet_get_local_port_range(sock_net(sk), &low, &high);
                remaining = (high - low) + 1;
                rover = net_random() % remaining + low;
 
index fcac5d14171779a32599e6b4111b75a8e94a15aa..084656671d6ee4cebc6220ad0d92bae654497882 100644 (file)
@@ -1075,6 +1075,15 @@ gss_destroy(struct rpc_auth *auth)
        kref_put(&gss_auth->kref, gss_free_callback);
 }
 
+/*
+ * Auths may be shared between rpc clients that were cloned from a
+ * common client with the same xprt, if they also share the flavor and
+ * target_name.
+ *
+ * The auth is looked up from the oldest parent sharing the same
+ * cl_xprt, and the auth itself references only that common parent
+ * (which is guaranteed to last as long as any of its descendants).
+ */
 static struct gss_auth *
 gss_auth_find_or_add_hashed(struct rpc_auth_create_args *args,
                struct rpc_clnt *clnt,
@@ -1088,6 +1097,8 @@ gss_auth_find_or_add_hashed(struct rpc_auth_create_args *args,
                        gss_auth,
                        hash,
                        hashval) {
+               if (gss_auth->client != clnt)
+                       continue;
                if (gss_auth->rpc_auth.au_flavor != args->pseudoflavor)
                        continue;
                if (gss_auth->target_name != args->target_name) {
index b9c3f9e943a9159d1617feec49c751055ea4dd55..d6e7f98fbfbffcdeb778c32a7a2d51bc987dc802 100644 (file)
@@ -468,7 +468,7 @@ expired:
        }
 
        err = __xfrm_state_delete(x);
-       if (!err && x->id.spi)
+       if (!err)
                km_state_expired(x, 1, 0);
 
        xfrm_audit_state_delete(x, err ? 0 : 1,
index 47016c304c847efffb96da5358224a9b5a93cc5e..66cad506b8a2a944f2873856ef0078445f673b8b 100755 (executable)
@@ -3975,8 +3975,8 @@ sub string_find_replace {
 # check for new externs in .h files.
                if ($realfile =~ /\.h$/ &&
                    $line =~ /^\+\s*(extern\s+)$Type\s*$Ident\s*\(/s) {
-                       if (WARN("AVOID_EXTERNS",
-                                "extern prototypes should be avoided in .h files\n" . $herecurr) &&
+                       if (CHK("AVOID_EXTERNS",
+                               "extern prototypes should be avoided in .h files\n" . $herecurr) &&
                            $fix) {
                                $fixed[$linenr - 1] =~ s/(.*)\bextern\b\s*(.*)/$1$2/;
                        }
index d6222ba4e9190207f3ddabbb58161e25f397169b..532471d0b3a0facd6c7cb70a930e4028ca6d0761 100644 (file)
  * it should be.
  */
 
-#include <linux/crypto.h>
+#include <crypto/hash.h>
 
 #include "include/apparmor.h"
 #include "include/crypto.h"
 
 static unsigned int apparmor_hash_size;
 
-static struct crypto_hash *apparmor_tfm;
+static struct crypto_shash *apparmor_tfm;
 
 unsigned int aa_hash_size(void)
 {
@@ -32,35 +32,33 @@ unsigned int aa_hash_size(void)
 int aa_calc_profile_hash(struct aa_profile *profile, u32 version, void *start,
                         size_t len)
 {
-       struct scatterlist sg[2];
-       struct hash_desc desc = {
-               .tfm = apparmor_tfm,
-               .flags = 0
-       };
+       struct {
+               struct shash_desc shash;
+               char ctx[crypto_shash_descsize(apparmor_tfm)];
+       } desc;
        int error = -ENOMEM;
        u32 le32_version = cpu_to_le32(version);
 
        if (!apparmor_tfm)
                return 0;
 
-       sg_init_table(sg, 2);
-       sg_set_buf(&sg[0], &le32_version, 4);
-       sg_set_buf(&sg[1], (u8 *) start, len);
-
        profile->hash = kzalloc(apparmor_hash_size, GFP_KERNEL);
        if (!profile->hash)
                goto fail;
 
-       error = crypto_hash_init(&desc);
+       desc.shash.tfm = apparmor_tfm;
+       desc.shash.flags = 0;
+
+       error = crypto_shash_init(&desc.shash);
        if (error)
                goto fail;
-       error = crypto_hash_update(&desc, &sg[0], 4);
+       error = crypto_shash_update(&desc.shash, (u8 *) &le32_version, 4);
        if (error)
                goto fail;
-       error = crypto_hash_update(&desc, &sg[1], len);
+       error = crypto_shash_update(&desc.shash, (u8 *) start, len);
        if (error)
                goto fail;
-       error = crypto_hash_final(&desc, profile->hash);
+       error = crypto_shash_final(&desc.shash, profile->hash);
        if (error)
                goto fail;
 
@@ -75,19 +73,19 @@ fail:
 
 static int __init init_profile_hash(void)
 {
-       struct crypto_hash *tfm;
+       struct crypto_shash *tfm;
 
        if (!apparmor_initialized)
                return 0;
 
-       tfm = crypto_alloc_hash("sha1", 0, CRYPTO_ALG_ASYNC);
+       tfm = crypto_alloc_shash("sha1", 0, CRYPTO_ALG_ASYNC);
        if (IS_ERR(tfm)) {
                int error = PTR_ERR(tfm);
                AA_ERROR("failed to setup profile sha1 hashing: %d\n", error);
                return error;
        }
        apparmor_tfm = tfm;
-       apparmor_hash_size = crypto_hash_digestsize(apparmor_tfm);
+       apparmor_hash_size = crypto_shash_digestsize(apparmor_tfm);
 
        aa_info_message("AppArmor sha1 policy hashing enabled");
 
index f2d4b6348cbc0a5f1ec0abdc0d2d975ad34ce493..c28b0f20ab53ed69da7c1e6b8c60a8099370a4f8 100644 (file)
@@ -360,7 +360,9 @@ static inline void aa_put_replacedby(struct aa_replacedby *p)
 static inline void __aa_update_replacedby(struct aa_profile *orig,
                                          struct aa_profile *new)
 {
-       struct aa_profile *tmp = rcu_dereference(orig->replacedby->profile);
+       struct aa_profile *tmp;
+       tmp = rcu_dereference_protected(orig->replacedby->profile,
+                                       mutex_is_locked(&orig->ns->lock));
        rcu_assign_pointer(orig->replacedby->profile, aa_get_profile(new));
        orig->flags |= PFLAG_INVALID;
        aa_put_profile(tmp);
index 6172509fa2b7441fbda60d1cd6e5b35fce432dc8..345bec07a27d9292dbfc1306d1f5e12563eaa891 100644 (file)
@@ -563,7 +563,8 @@ void __init aa_free_root_ns(void)
 static void free_replacedby(struct aa_replacedby *r)
 {
        if (r) {
-               aa_put_profile(rcu_dereference(r->profile));
+               /* r->profile will not be updated any more as r is dead */
+               aa_put_profile(rcu_dereference_protected(r->profile, true));
                kzfree(r);
        }
 }
index a5091ec06aa62816798510e40a1bcf005d2abd3d..568c7699abf1b6c8bdf20bfed594f7ab271a7ea9 100644 (file)
@@ -3929,7 +3929,7 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in
                if (snum) {
                        int low, high;
 
-                       inet_get_local_port_range(&low, &high);
+                       inet_get_local_port_range(sock_net(sk), &low, &high);
 
                        if (snum < max(PROT_SOCK, low) || snum > high) {
                                err = sel_netport_sid(sk->sk_protocol,
index 98969541cbcc9c62ff81afb9818d2399dd6e7eb0..bea523a5d852e11f9d06e1d682e065b699f347c8 100644 (file)
@@ -139,6 +139,18 @@ static int snd_compr_open(struct inode *inode, struct file *f)
 static int snd_compr_free(struct inode *inode, struct file *f)
 {
        struct snd_compr_file *data = f->private_data;
+       struct snd_compr_runtime *runtime = data->stream.runtime;
+
+       switch (runtime->state) {
+       case SNDRV_PCM_STATE_RUNNING:
+       case SNDRV_PCM_STATE_DRAINING:
+       case SNDRV_PCM_STATE_PAUSED:
+               data->stream.ops->trigger(&data->stream, SNDRV_PCM_TRIGGER_STOP);
+               break;
+       default:
+               break;
+       }
+
        data->stream.ops->free(&data->stream);
        kfree(data->stream.runtime->buffer);
        kfree(data->stream.runtime);
@@ -837,7 +849,8 @@ static int snd_compress_dev_disconnect(struct snd_device *device)
        struct snd_compr *compr;
 
        compr = device->device_data;
-       snd_unregister_device(compr->direction, compr->card, compr->device);
+       snd_unregister_device(SNDRV_DEVICE_TYPE_COMPRESS, compr->card,
+               compr->device);
        return 0;
 }
 
index b524f89a1f13a7d74c51c8231684f971662ccc15..18d9725015855c0ce3f33d174a3dfcd55836efeb 100644 (file)
@@ -111,6 +111,9 @@ enum {
 /* 0x0009 - 0x0014 -> 12 test regs */
 /* 0x0015 - visibility reg */
 
+/* Cirrus Logic CS4208 */
+#define CS4208_VENDOR_NID      0x24
+
 /*
  * Cirrus Logic CS4210
  *
@@ -223,6 +226,16 @@ static const struct hda_verb cs_coef_init_verbs[] = {
        {} /* terminator */
 };
 
+static const struct hda_verb cs4208_coef_init_verbs[] = {
+       {0x01, AC_VERB_SET_POWER_STATE, 0x00}, /* AFG: D0 */
+       {0x24, AC_VERB_SET_PROC_STATE, 0x01},  /* VPW: processing on */
+       {0x24, AC_VERB_SET_COEF_INDEX, 0x0033},
+       {0x24, AC_VERB_SET_PROC_COEF, 0x0001}, /* A1 ICS */
+       {0x24, AC_VERB_SET_COEF_INDEX, 0x0034},
+       {0x24, AC_VERB_SET_PROC_COEF, 0x1C01}, /* A1 Enable, A Thresh = 300mV */
+       {} /* terminator */
+};
+
 /* Errata: CS4207 rev C0/C1/C2 Silicon
  *
  * http://www.cirrus.com/en/pubs/errata/ER880C3.pdf
@@ -295,6 +308,8 @@ static int cs_init(struct hda_codec *codec)
                /* init_verb sequence for C0/C1/C2 errata*/
                snd_hda_sequence_write(codec, cs_errata_init_verbs);
                snd_hda_sequence_write(codec, cs_coef_init_verbs);
+       } else if (spec->vendor_nid == CS4208_VENDOR_NID) {
+               snd_hda_sequence_write(codec, cs4208_coef_init_verbs);
        }
 
        snd_hda_gen_init(codec);
@@ -434,6 +449,29 @@ static const struct hda_pintbl mba42_pincfgs[] = {
        {} /* terminator */
 };
 
+static const struct hda_pintbl mba6_pincfgs[] = {
+       { 0x10, 0x032120f0 }, /* HP */
+       { 0x11, 0x500000f0 },
+       { 0x12, 0x90100010 }, /* Speaker */
+       { 0x13, 0x500000f0 },
+       { 0x14, 0x500000f0 },
+       { 0x15, 0x770000f0 },
+       { 0x16, 0x770000f0 },
+       { 0x17, 0x430000f0 },
+       { 0x18, 0x43ab9030 }, /* Mic */
+       { 0x19, 0x770000f0 },
+       { 0x1a, 0x770000f0 },
+       { 0x1b, 0x770000f0 },
+       { 0x1c, 0x90a00090 },
+       { 0x1d, 0x500000f0 },
+       { 0x1e, 0x500000f0 },
+       { 0x1f, 0x500000f0 },
+       { 0x20, 0x500000f0 },
+       { 0x21, 0x430000f0 },
+       { 0x22, 0x430000f0 },
+       {} /* terminator */
+};
+
 static void cs420x_fixup_gpio_13(struct hda_codec *codec,
                                 const struct hda_fixup *fix, int action)
 {
@@ -556,22 +594,23 @@ static int patch_cs420x(struct hda_codec *codec)
 
 /*
  * CS4208 support:
- * Its layout is no longer compatible with CS4206/CS4207, and the generic
- * parser seems working fairly well, except for trivial fixups.
+ * Its layout is no longer compatible with CS4206/CS4207
  */
 enum {
+       CS4208_MBA6,
        CS4208_GPIO0,
 };
 
 static const struct hda_model_fixup cs4208_models[] = {
        { .id = CS4208_GPIO0, .name = "gpio0" },
+       { .id = CS4208_MBA6, .name = "mba6" },
        {}
 };
 
 static const struct snd_pci_quirk cs4208_fixup_tbl[] = {
        /* codec SSID */
-       SND_PCI_QUIRK(0x106b, 0x7100, "MacBookPro 6,1", CS4208_GPIO0),
-       SND_PCI_QUIRK(0x106b, 0x7200, "MacBookPro 6,2", CS4208_GPIO0),
+       SND_PCI_QUIRK(0x106b, 0x7100, "MacBookAir 6,1", CS4208_MBA6),
+       SND_PCI_QUIRK(0x106b, 0x7200, "MacBookAir 6,2", CS4208_MBA6),
        {} /* terminator */
 };
 
@@ -588,18 +627,35 @@ static void cs4208_fixup_gpio0(struct hda_codec *codec,
 }
 
 static const struct hda_fixup cs4208_fixups[] = {
+       [CS4208_MBA6] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = mba6_pincfgs,
+               .chained = true,
+               .chain_id = CS4208_GPIO0,
+       },
        [CS4208_GPIO0] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = cs4208_fixup_gpio0,
        },
 };
 
+/* correct the 0dB offset of input pins */
+static void cs4208_fix_amp_caps(struct hda_codec *codec, hda_nid_t adc)
+{
+       unsigned int caps;
+
+       caps = query_amp_caps(codec, adc, HDA_INPUT);
+       caps &= ~(AC_AMPCAP_OFFSET);
+       caps |= 0x02;
+       snd_hda_override_amp_caps(codec, adc, HDA_INPUT, caps);
+}
+
 static int patch_cs4208(struct hda_codec *codec)
 {
        struct cs_spec *spec;
        int err;
 
-       spec = cs_alloc_spec(codec, 0); /* no specific w/a */
+       spec = cs_alloc_spec(codec, CS4208_VENDOR_NID);
        if (!spec)
                return -ENOMEM;
 
@@ -609,6 +665,12 @@ static int patch_cs4208(struct hda_codec *codec)
                           cs4208_fixups);
        snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PRE_PROBE);
 
+       snd_hda_override_wcaps(codec, 0x18,
+                              get_wcaps(codec, 0x18) | AC_WCAP_STEREO);
+       cs4208_fix_amp_caps(codec, 0x18);
+       cs4208_fix_amp_caps(codec, 0x1b);
+       cs4208_fix_amp_caps(codec, 0x1c);
+
        err = cs_parse_auto_config(codec);
        if (err < 0)
                goto error;
index 3d8cd04455a623b888a9efccb7bdd31f92df8188..7ea0245fc6bd5fe5579d81682a2d39ce6cd06af7 100644 (file)
@@ -1149,32 +1149,43 @@ static int hdmi_choose_cvt(struct hda_codec *codec,
 }
 
 static void haswell_config_cvts(struct hda_codec *codec,
-                       int pin_id, int mux_id)
+                       hda_nid_t pin_nid, int mux_idx)
 {
        struct hdmi_spec *spec = codec->spec;
-       struct hdmi_spec_per_pin *per_pin;
-       int pin_idx, mux_idx;
-       int curr;
-       int err;
+       hda_nid_t nid, end_nid;
+       int cvt_idx, curr;
+       struct hdmi_spec_per_cvt *per_cvt;
 
-       for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) {
-               per_pin = get_pin(spec, pin_idx);
+       /* configure all pins, including "no physical connection" ones */
+       end_nid = codec->start_nid + codec->num_nodes;
+       for (nid = codec->start_nid; nid < end_nid; nid++) {
+               unsigned int wid_caps = get_wcaps(codec, nid);
+               unsigned int wid_type = get_wcaps_type(wid_caps);
 
-               if (pin_idx == pin_id)
+               if (wid_type != AC_WID_PIN)
                        continue;
 
-               curr = snd_hda_codec_read(codec, per_pin->pin_nid, 0,
+               if (nid == pin_nid)
+                       continue;
+
+               curr = snd_hda_codec_read(codec, nid, 0,
                                          AC_VERB_GET_CONNECT_SEL, 0);
+               if (curr != mux_idx)
+                       continue;
 
-               /* Choose another unused converter */
-               if (curr == mux_id) {
-                       err = hdmi_choose_cvt(codec, pin_idx, NULL, &mux_idx);
-                       if (err < 0)
-                               return;
-                       snd_printdd("HDMI: choose converter %d for pin %d\n", mux_idx, pin_idx);
-                       snd_hda_codec_write_cache(codec, per_pin->pin_nid, 0,
+               /* choose an unassigned converter. The conveters in the
+                * connection list are in the same order as in the codec.
+                */
+               for (cvt_idx = 0; cvt_idx < spec->num_cvts; cvt_idx++) {
+                       per_cvt = get_cvt(spec, cvt_idx);
+                       if (!per_cvt->assigned) {
+                               snd_printdd("choose cvt %d for pin nid %d\n",
+                                       cvt_idx, nid);
+                               snd_hda_codec_write_cache(codec, nid, 0,
                                            AC_VERB_SET_CONNECT_SEL,
-                                           mux_idx);
+                                           cvt_idx);
+                               break;
+                       }
                }
        }
 }
@@ -1216,7 +1227,7 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo,
 
        /* configure unused pins to choose other converters */
        if (is_haswell(codec))
-               haswell_config_cvts(codec, pin_idx, mux_idx);
+               haswell_config_cvts(codec, per_pin->pin_nid, mux_idx);
 
        snd_hda_spdif_ctls_assign(codec, pin_idx, per_cvt->cvt_nid);
 
index bc07d369fac43a548db7f1e45273627cc4e7246c..0e303b99a47ceef3037e7b245ed5d93caf1f7779 100644 (file)
@@ -3439,6 +3439,9 @@ static void alc283_fixup_chromebook(struct hda_codec *codec,
                /* Set to manual mode */
                val = alc_read_coef_idx(codec, 0x06);
                alc_write_coef_idx(codec, 0x06, val & ~0x000c);
+               /* Enable Line1 input control by verb */
+               val = alc_read_coef_idx(codec, 0x1a);
+               alc_write_coef_idx(codec, 0x1a, val | (1 << 4));
                break;
        }
 }
@@ -3531,6 +3534,7 @@ enum {
        ALC269VB_FIXUP_ORDISSIMO_EVE2,
        ALC283_FIXUP_CHROME_BOOK,
        ALC282_FIXUP_ASUS_TX300,
+       ALC283_FIXUP_INT_MIC,
 };
 
 static const struct hda_fixup alc269_fixups[] = {
@@ -3790,6 +3794,16 @@ static const struct hda_fixup alc269_fixups[] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc282_fixup_asus_tx300,
        },
+       [ALC283_FIXUP_INT_MIC] = {
+               .type = HDA_FIXUP_VERBS,
+               .v.verbs = (const struct hda_verb[]) {
+                       {0x20, AC_VERB_SET_COEF_INDEX, 0x1a},
+                       {0x20, AC_VERB_SET_PROC_COEF, 0x0011},
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC269_FIXUP_LIMIT_INT_MIC_BOOST
+       },
 };
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -3874,7 +3888,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
        SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
        SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
-       SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+       SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC),
        SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
        SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
        SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
index 099e7cd022e46d47260103b226b3410beac27133..7c43479623537af4f0d4179f4cce195cbea3e9b1 100644 (file)
@@ -5,7 +5,6 @@
 #include <stdbool.h>
 #include <sys/vfs.h>
 #include <sys/mount.h>
-#include <linux/magic.h>
 #include <linux/kernel.h>
 
 #include "debugfs.h"
index 9570c2b0f83c580454e2610cb3c07a4d7443ee19..b2519e49424f4c42bb389de846d21c8337773036 100644 (file)
@@ -32,7 +32,7 @@ u64 tsc_to_perf_time(u64 cyc, struct perf_tsc_conversion *tc)
 int perf_read_tsc_conversion(const struct perf_event_mmap_page *pc,
                             struct perf_tsc_conversion *tc)
 {
-       bool cap_usr_time_zero;
+       bool cap_user_time_zero;
        u32 seq;
        int i = 0;
 
@@ -42,7 +42,7 @@ int perf_read_tsc_conversion(const struct perf_event_mmap_page *pc,
                tc->time_mult = pc->time_mult;
                tc->time_shift = pc->time_shift;
                tc->time_zero = pc->time_zero;
-               cap_usr_time_zero = pc->cap_usr_time_zero;
+               cap_user_time_zero = pc->cap_user_time_zero;
                rmb();
                if (pc->lock == seq && !(seq & 1))
                        break;
@@ -52,7 +52,7 @@ int perf_read_tsc_conversion(const struct perf_event_mmap_page *pc,
                }
        }
 
-       if (!cap_usr_time_zero)
+       if (!cap_user_time_zero)
                return -EOPNOTSUPP;
 
        return 0;
index 423875c999b21208a5a6274d0fd47a2fb359ecd8..afe377b2884f740b0c469a367ab3938d99f7df0e 100644 (file)
@@ -321,8 +321,6 @@ found:
        return perf_event__repipe(tool, event_sw, &sample_sw, machine);
 }
 
-extern volatile int session_done;
-
 static void sig_handler(int sig __maybe_unused)
 {
        session_done = 1;
index c2dff9cb1f2ce1ac150401d7b859229beb53998e..9b5f077fee5b1b65a4e51b48ef1af0727b8c134b 100644 (file)
@@ -101,7 +101,7 @@ static int setup_cpunode_map(void)
 
        dir1 = opendir(PATH_SYS_NODE);
        if (!dir1)
-               return -1;
+               return 0;
 
        while ((dent1 = readdir(dir1)) != NULL) {
                if (dent1->d_type != DT_DIR ||
index 8e50d8d77419c7ca3e8ce72209b113d72665c09e..72eae7498c09419c8f1f77a7a005c6dcbbb5eb4b 100644 (file)
@@ -401,8 +401,6 @@ static int perf_report__setup_sample_type(struct perf_report *rep)
        return 0;
 }
 
-extern volatile int session_done;
-
 static void sig_handler(int sig __maybe_unused)
 {
        session_done = 1;
@@ -568,6 +566,9 @@ static int __cmd_report(struct perf_report *rep)
                }
        }
 
+       if (session_done())
+               return 0;
+
        if (nr_samples == 0) {
                ui__error("The %s file has no samples!\n", session->filename);
                return 0;
index 7f31a3ded1b6dc59730a0162d5af279e038d1afc..9c333ff3dfeb3de716eac13d48d7364e998bb50d 100644 (file)
@@ -553,8 +553,6 @@ static struct perf_tool perf_script = {
        .ordering_requires_timestamps = true,
 };
 
-extern volatile int session_done;
-
 static void sig_handler(int sig __maybe_unused)
 {
        session_done = 1;
index f5aa6375e3e9add4eea3cdd81de685fa37d87c4d..71aa3e35406bd064e87044d2ae71597a5f117092 100644 (file)
 #include <sys/mman.h>
 #include <linux/futex.h>
 
+/* For older distros: */
+#ifndef MAP_STACK
+# define MAP_STACK             0x20000
+#endif
+
+#ifndef MADV_HWPOISON
+# define MADV_HWPOISON         100
+#endif
+
+#ifndef MADV_MERGEABLE
+# define MADV_MERGEABLE                12
+#endif
+
+#ifndef MADV_UNMERGEABLE
+# define MADV_UNMERGEABLE      13
+#endif
+
 static size_t syscall_arg__scnprintf_hex(char *bf, size_t size,
                                         unsigned long arg,
                                         u8 arg_idx __maybe_unused,
@@ -1038,6 +1055,7 @@ static int trace__replay(struct trace *trace)
 
        trace->tool.sample        = trace__process_sample;
        trace->tool.mmap          = perf_event__process_mmap;
+       trace->tool.mmap2         = perf_event__process_mmap2;
        trace->tool.comm          = perf_event__process_comm;
        trace->tool.exit          = perf_event__process_exit;
        trace->tool.fork          = perf_event__process_fork;
index 214e17e97e5c7ba5aa25545b465b8994ac666afc..5f6f9b3271bb0657b77206f6723fd8b3786041bd 100644 (file)
@@ -87,7 +87,7 @@ CFLAGS += -Wall
 CFLAGS += -Wextra
 CFLAGS += -std=gnu99
 
-EXTLIBS = -lelf -lpthread -lrt -lm
+EXTLIBS = -lelf -lpthread -lrt -lm -ldl
 
 ifeq ($(call try-cc,$(SOURCE_HELLO),$(CFLAGS) -Werror -fstack-protector-all,-fstack-protector-all),y)
   CFLAGS += -fstack-protector-all
@@ -180,6 +180,9 @@ FLAGS_LIBELF=$(CFLAGS) $(LDFLAGS) $(EXTLIBS)
 ifeq ($(call try-cc,$(SOURCE_ELF_MMAP),$(FLAGS_LIBELF),-DLIBELF_MMAP),y)
   CFLAGS += -DLIBELF_MMAP
 endif
+ifeq ($(call try-cc,$(SOURCE_ELF_GETPHDRNUM),$(FLAGS_LIBELF),-DHAVE_ELF_GETPHDRNUM),y)
+  CFLAGS += -DHAVE_ELF_GETPHDRNUM
+endif
 
 # include ARCH specific config
 -include $(src-perf)/arch/$(ARCH)/Makefile
index 708fb8e9822a3ed43bd192470c5da6f01c236c38..d5a8dd44945fcc6483eabfafde3b5d882b46f6f1 100644 (file)
@@ -61,6 +61,15 @@ int main(void)
 }
 endef
 
+define SOURCE_ELF_GETPHDRNUM
+#include <libelf.h>
+int main(void)
+{
+       size_t dst;
+       return elf_getphdrnum(0, &dst);
+}
+endef
+
 ifndef NO_SLANG
 define SOURCE_SLANG
 #include <slang.h>
@@ -210,6 +219,7 @@ define SOURCE_LIBAUDIT
 
 int main(void)
 {
+       printf(\"error message: %s\n\", audit_errno_to_name(0));
        return audit_open();
 }
 endef
index bfc5a27597d60e1f09b7f95ef691e37608f8c694..7eae5488ecea47344cac10677104cb9e6cb2cc44 100644 (file)
@@ -809,7 +809,7 @@ static int symbol__parse_objdump_line(struct symbol *sym, struct map *map,
                    end = map__rip_2objdump(map, sym->end);
 
                offset = line_ip - start;
-               if (offset < 0 || (u64)line_ip > end)
+               if ((u64)line_ip < start || (u64)line_ip > end)
                        offset = -1;
                else
                        parsed_line = tmp2 + 1;
index 3e5f5430a28aa929741e2bd3ccbcf554ae62a074..e23bde19d590872c6567c35d0df39fd5d0d68a87 100644 (file)
@@ -262,6 +262,21 @@ bool die_is_signed_type(Dwarf_Die *tp_die)
                ret == DW_ATE_signed_fixed);
 }
 
+/**
+ * die_is_func_def - Ensure that this DIE is a subprogram and definition
+ * @dw_die: a DIE
+ *
+ * Ensure that this DIE is a subprogram and NOT a declaration. This
+ * returns true if @dw_die is a function definition.
+ **/
+bool die_is_func_def(Dwarf_Die *dw_die)
+{
+       Dwarf_Attribute attr;
+
+       return (dwarf_tag(dw_die) == DW_TAG_subprogram &&
+               dwarf_attr(dw_die, DW_AT_declaration, &attr) == NULL);
+}
+
 /**
  * die_get_data_member_location - Get the data-member offset
  * @mb_die: a DIE of a member of a data structure
@@ -392,6 +407,10 @@ static int __die_search_func_cb(Dwarf_Die *fn_die, void *data)
 {
        struct __addr_die_search_param *ad = data;
 
+       /*
+        * Since a declaration entry doesn't has given pc, this always returns
+        * function definition entry.
+        */
        if (dwarf_tag(fn_die) == DW_TAG_subprogram &&
            dwarf_haspc(fn_die, ad->addr)) {
                memcpy(ad->die_mem, fn_die, sizeof(Dwarf_Die));
index 6ce1717784b7ab42a14d58431c3026df749f0620..8658d41697d27fbbe06dbd1fbdbc3b9d895b4606 100644 (file)
@@ -38,6 +38,9 @@ extern int cu_find_lineinfo(Dwarf_Die *cudie, unsigned long addr,
 extern int cu_walk_functions_at(Dwarf_Die *cu_die, Dwarf_Addr addr,
                        int (*callback)(Dwarf_Die *, void *), void *data);
 
+/* Ensure that this DIE is a subprogram and definition (not declaration) */
+extern bool die_is_func_def(Dwarf_Die *dw_die);
+
 /* Compare diename and tname */
 extern bool die_compare_name(Dwarf_Die *dw_die, const char *tname);
 
index 26441d0e571bfce2bcd469dfdfe428175facb5ee..ce69901176d864506d39cc0df4f5dbe9479f9383 100644 (file)
@@ -199,9 +199,11 @@ static int write_buildid(char *name, size_t name_len, u8 *build_id,
        return write_padded(fd, name, name_len + 1, len);
 }
 
-static int __dsos__write_buildid_table(struct list_head *head, pid_t pid,
-                               u16 misc, int fd)
+static int __dsos__write_buildid_table(struct list_head *head,
+                                      struct machine *machine,
+                                      pid_t pid, u16 misc, int fd)
 {
+       char nm[PATH_MAX];
        struct dso *pos;
 
        dsos__for_each_with_build_id(pos, head) {
@@ -215,6 +217,10 @@ static int __dsos__write_buildid_table(struct list_head *head, pid_t pid,
                if (is_vdso_map(pos->short_name)) {
                        name = (char *) VDSO__MAP_NAME;
                        name_len = sizeof(VDSO__MAP_NAME) + 1;
+               } else if (dso__is_kcore(pos)) {
+                       machine__mmap_name(machine, nm, sizeof(nm));
+                       name = nm;
+                       name_len = strlen(nm) + 1;
                } else {
                        name = pos->long_name;
                        name_len = pos->long_name_len + 1;
@@ -240,10 +246,10 @@ static int machine__write_buildid_table(struct machine *machine, int fd)
                umisc = PERF_RECORD_MISC_GUEST_USER;
        }
 
-       err = __dsos__write_buildid_table(&machine->kernel_dsos, machine->pid,
-                                         kmisc, fd);
+       err = __dsos__write_buildid_table(&machine->kernel_dsos, machine,
+                                         machine->pid, kmisc, fd);
        if (err == 0)
-               err = __dsos__write_buildid_table(&machine->user_dsos,
+               err = __dsos__write_buildid_table(&machine->user_dsos, machine,
                                                  machine->pid, umisc, fd);
        return err;
 }
@@ -375,23 +381,31 @@ out_free:
        return err;
 }
 
-static int dso__cache_build_id(struct dso *dso, const char *debugdir)
+static int dso__cache_build_id(struct dso *dso, struct machine *machine,
+                              const char *debugdir)
 {
        bool is_kallsyms = dso->kernel && dso->long_name[0] != '/';
        bool is_vdso = is_vdso_map(dso->short_name);
+       char *name = dso->long_name;
+       char nm[PATH_MAX];
 
-       return build_id_cache__add_b(dso->build_id, sizeof(dso->build_id),
-                                    dso->long_name, debugdir,
-                                    is_kallsyms, is_vdso);
+       if (dso__is_kcore(dso)) {
+               is_kallsyms = true;
+               machine__mmap_name(machine, nm, sizeof(nm));
+               name = nm;
+       }
+       return build_id_cache__add_b(dso->build_id, sizeof(dso->build_id), name,
+                                    debugdir, is_kallsyms, is_vdso);
 }
 
-static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir)
+static int __dsos__cache_build_ids(struct list_head *head,
+                                  struct machine *machine, const char *debugdir)
 {
        struct dso *pos;
        int err = 0;
 
        dsos__for_each_with_build_id(pos, head)
-               if (dso__cache_build_id(pos, debugdir))
+               if (dso__cache_build_id(pos, machine, debugdir))
                        err = -1;
 
        return err;
@@ -399,8 +413,9 @@ static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir)
 
 static int machine__cache_build_ids(struct machine *machine, const char *debugdir)
 {
-       int ret = __dsos__cache_build_ids(&machine->kernel_dsos, debugdir);
-       ret |= __dsos__cache_build_ids(&machine->user_dsos, debugdir);
+       int ret = __dsos__cache_build_ids(&machine->kernel_dsos, machine,
+                                         debugdir);
+       ret |= __dsos__cache_build_ids(&machine->user_dsos, machine, debugdir);
        return ret;
 }
 
index 46a0d35a05e1f21aae097e7a67cea89bfe9ecddf..9ff6cf3e9a99f69596b37372f60203c11bec88a3 100644 (file)
@@ -611,6 +611,8 @@ void hists__collapse_resort(struct hists *hists)
        next = rb_first(root);
 
        while (next) {
+               if (session_done())
+                       break;
                n = rb_entry(next, struct hist_entry, rb_node_in);
                next = rb_next(&n->rb_node_in);
 
index 933d14f287ca92645152f7714651a10b818e87bc..6188d2876a7128aaa68e426c3334dfcae099dc41 100644 (file)
@@ -792,7 +792,7 @@ static int machine__create_modules(struct machine *machine)
                modules = path;
        }
 
-       if (symbol__restricted_filename(path, "/proc/modules"))
+       if (symbol__restricted_filename(modules, "/proc/modules"))
                return -1;
 
        file = fopen(modules, "r");
index be0329394d5639f77644d8a9079dd6ceb4f27a73..371476cb8ddc17a6643a6298ba887f8c6aa75bcb 100644 (file)
@@ -118,7 +118,6 @@ static const Dwfl_Callbacks offline_callbacks = {
 static int debuginfo__init_offline_dwarf(struct debuginfo *self,
                                         const char *path)
 {
-       Dwfl_Module *mod;
        int fd;
 
        fd = open(path, O_RDONLY);
@@ -129,11 +128,11 @@ static int debuginfo__init_offline_dwarf(struct debuginfo *self,
        if (!self->dwfl)
                goto error;
 
-       mod = dwfl_report_offline(self->dwfl, "", "", fd);
-       if (!mod)
+       self->mod = dwfl_report_offline(self->dwfl, "", "", fd);
+       if (!self->mod)
                goto error;
 
-       self->dbg = dwfl_module_getdwarf(mod, &self->bias);
+       self->dbg = dwfl_module_getdwarf(self->mod, &self->bias);
        if (!self->dbg)
                goto error;
 
@@ -676,37 +675,42 @@ static int find_variable(Dwarf_Die *sc_die, struct probe_finder *pf)
 }
 
 /* Convert subprogram DIE to trace point */
-static int convert_to_trace_point(Dwarf_Die *sp_die, Dwarf_Addr paddr,
-                                 bool retprobe, struct probe_trace_point *tp)
+static int convert_to_trace_point(Dwarf_Die *sp_die, Dwfl_Module *mod,
+                                 Dwarf_Addr paddr, bool retprobe,
+                                 struct probe_trace_point *tp)
 {
        Dwarf_Addr eaddr, highaddr;
-       const char *name;
-
-       /* Copy the name of probe point */
-       name = dwarf_diename(sp_die);
-       if (name) {
-               if (dwarf_entrypc(sp_die, &eaddr) != 0) {
-                       pr_warning("Failed to get entry address of %s\n",
-                                  dwarf_diename(sp_die));
-                       return -ENOENT;
-               }
-               if (dwarf_highpc(sp_die, &highaddr) != 0) {
-                       pr_warning("Failed to get end address of %s\n",
-                                  dwarf_diename(sp_die));
-                       return -ENOENT;
-               }
-               if (paddr > highaddr) {
-                       pr_warning("Offset specified is greater than size of %s\n",
-                                  dwarf_diename(sp_die));
-                       return -EINVAL;
-               }
-               tp->symbol = strdup(name);
-               if (tp->symbol == NULL)
-                       return -ENOMEM;
-               tp->offset = (unsigned long)(paddr - eaddr);
-       } else
-               /* This function has no name. */
-               tp->offset = (unsigned long)paddr;
+       GElf_Sym sym;
+       const char *symbol;
+
+       /* Verify the address is correct */
+       if (dwarf_entrypc(sp_die, &eaddr) != 0) {
+               pr_warning("Failed to get entry address of %s\n",
+                          dwarf_diename(sp_die));
+               return -ENOENT;
+       }
+       if (dwarf_highpc(sp_die, &highaddr) != 0) {
+               pr_warning("Failed to get end address of %s\n",
+                          dwarf_diename(sp_die));
+               return -ENOENT;
+       }
+       if (paddr > highaddr) {
+               pr_warning("Offset specified is greater than size of %s\n",
+                          dwarf_diename(sp_die));
+               return -EINVAL;
+       }
+
+       /* Get an appropriate symbol from symtab */
+       symbol = dwfl_module_addrsym(mod, paddr, &sym, NULL);
+       if (!symbol) {
+               pr_warning("Failed to find symbol at 0x%lx\n",
+                          (unsigned long)paddr);
+               return -ENOENT;
+       }
+       tp->offset = (unsigned long)(paddr - sym.st_value);
+       tp->symbol = strdup(symbol);
+       if (!tp->symbol)
+               return -ENOMEM;
 
        /* Return probe must be on the head of a subprogram */
        if (retprobe) {
@@ -734,7 +738,7 @@ static int call_probe_finder(Dwarf_Die *sc_die, struct probe_finder *pf)
        }
 
        /* If not a real subprogram, find a real one */
-       if (dwarf_tag(sc_die) != DW_TAG_subprogram) {
+       if (!die_is_func_def(sc_die)) {
                if (!die_find_realfunc(&pf->cu_die, pf->addr, &pf->sp_die)) {
                        pr_warning("Failed to find probe point in any "
                                   "functions.\n");
@@ -980,12 +984,10 @@ static int probe_point_search_cb(Dwarf_Die *sp_die, void *data)
        struct dwarf_callback_param *param = data;
        struct probe_finder *pf = param->data;
        struct perf_probe_point *pp = &pf->pev->point;
-       Dwarf_Attribute attr;
 
        /* Check tag and diename */
-       if (dwarf_tag(sp_die) != DW_TAG_subprogram ||
-           !die_compare_name(sp_die, pp->function) ||
-           dwarf_attr(sp_die, DW_AT_declaration, &attr))
+       if (!die_is_func_def(sp_die) ||
+           !die_compare_name(sp_die, pp->function))
                return DWARF_CB_OK;
 
        /* Check declared file */
@@ -1151,7 +1153,7 @@ static int add_probe_trace_event(Dwarf_Die *sc_die, struct probe_finder *pf)
        tev = &tf->tevs[tf->ntevs++];
 
        /* Trace point should be converted from subprogram DIE */
-       ret = convert_to_trace_point(&pf->sp_die, pf->addr,
+       ret = convert_to_trace_point(&pf->sp_die, tf->mod, pf->addr,
                                     pf->pev->point.retprobe, &tev->point);
        if (ret < 0)
                return ret;
@@ -1183,7 +1185,7 @@ int debuginfo__find_trace_events(struct debuginfo *self,
 {
        struct trace_event_finder tf = {
                        .pf = {.pev = pev, .callback = add_probe_trace_event},
-                       .max_tevs = max_tevs};
+                       .mod = self->mod, .max_tevs = max_tevs};
        int ret;
 
        /* Allocate result tevs array */
@@ -1252,7 +1254,7 @@ static int add_available_vars(Dwarf_Die *sc_die, struct probe_finder *pf)
        vl = &af->vls[af->nvls++];
 
        /* Trace point should be converted from subprogram DIE */
-       ret = convert_to_trace_point(&pf->sp_die, pf->addr,
+       ret = convert_to_trace_point(&pf->sp_die, af->mod, pf->addr,
                                     pf->pev->point.retprobe, &vl->point);
        if (ret < 0)
                return ret;
@@ -1291,6 +1293,7 @@ int debuginfo__find_available_vars_at(struct debuginfo *self,
 {
        struct available_var_finder af = {
                        .pf = {.pev = pev, .callback = add_available_vars},
+                       .mod = self->mod,
                        .max_vls = max_vls, .externs = externs};
        int ret;
 
@@ -1474,7 +1477,7 @@ static int line_range_inline_cb(Dwarf_Die *in_die, void *data)
        return 0;
 }
 
-/* Search function from function name */
+/* Search function definition from function name */
 static int line_range_search_cb(Dwarf_Die *sp_die, void *data)
 {
        struct dwarf_callback_param *param = data;
@@ -1485,7 +1488,7 @@ static int line_range_search_cb(Dwarf_Die *sp_die, void *data)
        if (lr->file && strtailcmp(lr->file, dwarf_decl_file(sp_die)))
                return DWARF_CB_OK;
 
-       if (dwarf_tag(sp_die) == DW_TAG_subprogram &&
+       if (die_is_func_def(sp_die) &&
            die_compare_name(sp_die, lr->function)) {
                lf->fname = dwarf_decl_file(sp_die);
                dwarf_decl_line(sp_die, &lr->offset);
index 17e94d0c36f981dbf8dbc245410b9a4aa511d16c..3b7d63018960d7ff6a6808ce81eff2d34b40bd09 100644 (file)
@@ -23,6 +23,7 @@ static inline int is_c_varname(const char *name)
 /* debug information structure */
 struct debuginfo {
        Dwarf           *dbg;
+       Dwfl_Module     *mod;
        Dwfl            *dwfl;
        Dwarf_Addr      bias;
 };
@@ -77,6 +78,7 @@ struct probe_finder {
 
 struct trace_event_finder {
        struct probe_finder     pf;
+       Dwfl_Module             *mod;           /* For solving symbols */
        struct probe_trace_event *tevs;         /* Found trace events */
        int                     ntevs;          /* Number of trace events */
        int                     max_tevs;       /* Max number of trace events */
@@ -84,6 +86,7 @@ struct trace_event_finder {
 
 struct available_var_finder {
        struct probe_finder     pf;
+       Dwfl_Module             *mod;           /* For solving symbols */
        struct variable_list    *vls;           /* Found variable lists */
        int                     nvls;           /* Number of variable lists */
        int                     max_vls;        /* Max no. of variable lists */
index 51f5edf2a6d0d140dd897c58f9d606c9fb25be9d..70ffa41518f34a1bbfaa82e67c46a8a22b133f67 100644 (file)
@@ -531,6 +531,9 @@ static int flush_sample_queue(struct perf_session *s,
                return 0;
 
        list_for_each_entry_safe(iter, tmp, head, list) {
+               if (session_done())
+                       return 0;
+
                if (iter->timestamp > limit)
                        break;
 
@@ -1160,7 +1163,6 @@ static void perf_session__warn_about_errors(const struct perf_session *session,
        }
 }
 
-#define session_done() (*(volatile int *)(&session_done))
 volatile int session_done;
 
 static int __perf_session__process_pipe_events(struct perf_session *self,
@@ -1372,10 +1374,13 @@ more:
                                    "Processing events...");
        }
 
+       err = 0;
+       if (session_done())
+               goto out_err;
+
        if (file_pos < file_size)
                goto more;
 
-       err = 0;
        /* do the final flush for ordered samples */
        session->ordered_samples.next_flush = ULLONG_MAX;
        err = flush_sample_queue(session, tool);
index 3aa75fb2225f7129daa12f7e86219f30928e5e42..04bf7373a7e5fb04222b1a9cdb5c295045c0626f 100644 (file)
@@ -124,4 +124,8 @@ int __perf_session__set_tracepoints_handlers(struct perf_session *session,
 
 #define perf_session__set_tracepoints_handlers(session, array) \
        __perf_session__set_tracepoints_handlers(session, array, ARRAY_SIZE(array))
+
+extern volatile int session_done;
+
+#define session_done() (*(volatile int *)(&session_done))
 #endif /* __PERF_SESSION_H */
index a7b9ab55738086c24d12d7e9ee8cb6143ffd47aa..a9c829be52169eac5b9f00d9382fd9281152b9e6 100644 (file)
@@ -8,6 +8,22 @@
 #include "symbol.h"
 #include "debug.h"
 
+#ifndef HAVE_ELF_GETPHDRNUM
+static int elf_getphdrnum(Elf *elf, size_t *dst)
+{
+       GElf_Ehdr gehdr;
+       GElf_Ehdr *ehdr;
+
+       ehdr = gelf_getehdr(elf, &gehdr);
+       if (!ehdr)
+               return -1;
+
+       *dst = ehdr->e_phnum;
+
+       return 0;
+}
+#endif
+
 #ifndef NT_GNU_BUILD_ID
 #define NT_GNU_BUILD_ID 3
 #endif
index fe7a27d67d2b7af23a596683509769a4f1bd6e38..e9e1c03f927d27bce1e041a9cc61ca47dae2b987 100644 (file)
@@ -186,7 +186,7 @@ void parse_proc_kallsyms(struct pevent *pevent,
        char *next = NULL;
        char *addr_str;
        char *mod;
-       char *fmt;
+       char *fmt = NULL;
 
        line = strtok_r(file, "\n", &next);
        while (line) {