Merge branches 'acpi-video' and 'acpi-hotplug'
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>
Sun, 27 Jul 2014 21:55:54 +0000 (23:55 +0200)
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>
Sun, 27 Jul 2014 21:55:54 +0000 (23:55 +0200)
* acpi-video:
  ACPI: move models with win8 brightness problems from win8 blacklist to use_native_backlight
  ACPI / video: Fix backlight taking 2 steps on a brightness up/down keypress

* acpi-hotplug:
  ACPI / hotplug / PCI: Fix sparse non static symbol warning
  ACPI / hotplug: Simplify acpi_set_hp_context()
  ACPI / hotplug / PCI: Eliminate acpiphp_dev_to_bridge()

457 files changed:
Documentation/acpi/enumeration.txt
Documentation/devicetree/bindings/cpufreq/cpufreq-cpu0.txt
Documentation/input/event-codes.txt
Documentation/kernel-parameters.txt
MAINTAINERS
Makefile
arch/arm/Kconfig
arch/arm/boot/dts/at91sam9n12.dtsi
arch/arm/boot/dts/at91sam9x5.dtsi
arch/arm/kernel/topology.c
arch/arm/mach-exynos/hotplug.c
arch/arm/mach-exynos/platsmp.c
arch/arm/mach-imx/clk-imx6q.c
arch/arm/mach-mvebu/coherency.c
arch/arm/mach-mvebu/headsmp-a9.S
arch/arm/mach-mvebu/pmsu.c
arch/arm64/Kconfig
arch/arm64/kernel/efi-stub.c
arch/arm64/mm/init.c
arch/blackfin/configs/BF609-EZKIT_defconfig
arch/blackfin/kernel/vmlinux.lds.S
arch/blackfin/mach-bf533/boards/blackstamp.c
arch/blackfin/mach-bf537/boards/cm_bf537e.c
arch/blackfin/mach-bf537/boards/cm_bf537u.c
arch/blackfin/mach-bf537/boards/tcm_bf537.c
arch/blackfin/mach-bf548/boards/ezkit.c
arch/blackfin/mach-bf561/boards/acvilon.c
arch/blackfin/mach-bf561/boards/cm_bf561.c
arch/blackfin/mach-bf561/boards/ezkit.c
arch/blackfin/mach-bf609/boards/ezkit.c
arch/blackfin/mach-bf609/include/mach/pm.h
arch/blackfin/mach-bf609/pm.c
arch/blackfin/mach-common/ints-priority.c
arch/ia64/Kconfig
arch/ia64/include/asm/acenv.h
arch/ia64/include/asm/acpi.h
arch/parisc/include/uapi/asm/signal.h
arch/parisc/mm/init.c
arch/powerpc/Kconfig
arch/powerpc/include/asm/cputable.h
arch/powerpc/include/asm/kvm_book3s_64.h
arch/powerpc/include/asm/mmu-hash64.h
arch/powerpc/include/asm/ppc_asm.h
arch/powerpc/kernel/cputable.c
arch/powerpc/kernel/smp.c
arch/powerpc/kvm/book3s_64_mmu_hv.c
arch/powerpc/kvm/book3s_hv_rm_mmu.c
arch/powerpc/kvm/book3s_hv_rmhandlers.S
arch/powerpc/kvm/book3s_interrupts.S
arch/powerpc/kvm/book3s_rmhandlers.S
arch/powerpc/kvm/book3s_rtas.c
arch/powerpc/kvm/e500_mmu_host.c
arch/powerpc/lib/mem_64.S
arch/powerpc/lib/sstep.c
arch/powerpc/net/bpf_jit_comp.c
arch/powerpc/platforms/pseries/dlpar.c
arch/powerpc/platforms/pseries/reconfig.c
arch/s390/include/asm/switch_to.h
arch/s390/kernel/head.S
arch/s390/kernel/ptrace.c
arch/s390/pci/pci.c
arch/sh/Makefile
arch/sparc/Kconfig
arch/sparc/include/uapi/asm/unistd.h
arch/sparc/kernel/sys32.S
arch/sparc/kernel/systbls_32.S
arch/sparc/kernel/systbls_64.S
arch/um/kernel/tlb.c
arch/um/kernel/trap.c
arch/um/os-Linux/skas/process.c
arch/x86/Kconfig
arch/x86/boot/header.S
arch/x86/boot/tools/build.c
arch/x86/include/asm/acenv.h
arch/x86/include/asm/acpi.h
arch/x86/kernel/apm_32.c
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/cpu/intel_cacheinfo.c
arch/x86/kernel/cpu/mcheck/mce.c
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/cpu/perf_event.h
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/cpu/perf_event_intel_ds.c
arch/x86/kernel/cpu/perf_event_intel_uncore.c
arch/x86/kernel/entry_32.S
arch/x86/kernel/espfix_64.c
arch/x86/kernel/kprobes/core.c
arch/x86/kernel/tsc.c
arch/x86/kvm/x86.c
arch/xtensa/kernel/vectors.S
arch/xtensa/kernel/vmlinux.lds.S
arch/xtensa/mm/init.c
block/blk-cgroup.c
block/blk-tag.c
block/compat_ioctl.c
drivers/acpi/Kconfig
drivers/acpi/Makefile
drivers/acpi/acpi_extlog.c
drivers/acpi/acpi_processor.c
drivers/acpi/acpica/Makefile
drivers/acpi/acpica/acapps.h
drivers/acpi/acpica/acdebug.h
drivers/acpi/acpica/acglobal.h
drivers/acpi/acpica/acutils.h
drivers/acpi/acpica/evgpe.c
drivers/acpi/acpica/evxfgpe.c
drivers/acpi/acpica/exfield.c
drivers/acpi/acpica/hwregs.c
drivers/acpi/acpica/nsobject.c
drivers/acpi/acpica/utbuffer.c
drivers/acpi/acpica/utcopy.c
drivers/acpi/acpica/utdebug.c
drivers/acpi/acpica/utfileio.c [new file with mode: 0644]
drivers/acpi/acpica/utglobal.c
drivers/acpi/acpica/utinit.c
drivers/acpi/acpica/utprint.c [new file with mode: 0644]
drivers/acpi/apei/apei-internal.h
drivers/acpi/apei/ghes.c
drivers/acpi/blacklist.c
drivers/acpi/bus.c
drivers/acpi/button.c
drivers/acpi/device_pm.c
drivers/acpi/internal.h
drivers/acpi/osl.c
drivers/acpi/pci_root.c
drivers/acpi/processor_core.c
drivers/acpi/processor_pdc.c [new file with mode: 0644]
drivers/acpi/scan.c
drivers/acpi/sleep.c
drivers/acpi/video.c
drivers/ata/ahci.c
drivers/ata/libata-core.c
drivers/ata/libata-eh.c
drivers/ata/pata_ep93xx.c
drivers/base/platform.c
drivers/block/drbd/drbd_nl.c
drivers/block/zram/zram_drv.c
drivers/bluetooth/ath3k.c
drivers/bluetooth/btusb.c
drivers/bluetooth/hci_h5.c
drivers/char/hw_random/core.c
drivers/char/hw_random/virtio-rng.c
drivers/char/random.c
drivers/cpufreq/Kconfig.arm
drivers/cpufreq/cpufreq-cpu0.c
drivers/cpufreq/cpufreq.c
drivers/cpufreq/sa1110-cpufreq.c
drivers/firewire/Kconfig
drivers/firewire/ohci.c
drivers/firmware/efi/cper.c
drivers/firmware/efi/efi.c
drivers/firmware/efi/fdt.c
drivers/gpio/gpio-mcp23s08.c
drivers/gpio/gpio-rcar.c
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_render_state.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_panel.c
drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
drivers/gpu/drm/qxl/qxl_irq.c
drivers/gpu/drm/radeon/atombios_crtc.c
drivers/gpu/drm/radeon/atombios_encoders.c
drivers/gpu/drm/radeon/cik.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/evergreen_reg.h
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_cs.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_display.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/radeon_kms.c
drivers/gpu/drm/radeon/radeon_vm.c
drivers/gpu/drm/radeon/rv515.c
drivers/gpu/drm/radeon/si.c
drivers/gpu/drm/radeon/trinity_dpm.c
drivers/hv/hv_fcopy.c
drivers/hwmon/adt7470.c
drivers/hwmon/da9052-hwmon.c
drivers/hwmon/da9055-hwmon.c
drivers/hwmon/smsc47m192.c
drivers/ide/Kconfig
drivers/ide/ide-probe.c
drivers/iio/accel/mma8452.c
drivers/iio/industrialio-event.c
drivers/infiniband/hw/cxgb4/cm.c
drivers/infiniband/hw/cxgb4/device.c
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
drivers/infiniband/hw/mlx5/qp.c
drivers/input/input.c
drivers/input/keyboard/st-keyscan.c
drivers/input/misc/sirfsoc-onkey.c
drivers/input/mouse/synaptics.c
drivers/input/serio/i8042-x86ia64io.h
drivers/input/tablet/wacom_wac.c
drivers/input/touchscreen/ti_am335x_tsc.c
drivers/iommu/fsl_pamu.c
drivers/iommu/fsl_pamu_domain.c
drivers/irqchip/irq-gic.c
drivers/isdn/hisax/l3ni1.c
drivers/isdn/i4l/isdn_ppp.c
drivers/md/dm-cache-metadata.c
drivers/md/dm-thin-metadata.c
drivers/media/dvb-frontends/si2168.c
drivers/media/dvb-frontends/si2168_priv.h
drivers/media/dvb-frontends/tda10071.c
drivers/media/dvb-frontends/tda10071_priv.h
drivers/media/pci/saa7134/saa7134-empress.c
drivers/media/platform/davinci/vpif_capture.c
drivers/media/platform/davinci/vpif_display.c
drivers/media/tuners/si2157.c
drivers/media/usb/dvb-usb-v2/af9035.c
drivers/media/usb/gspca/pac7302.c
drivers/media/usb/hdpvr/hdpvr-video.c
drivers/media/v4l2-core/v4l2-dv-timings.c
drivers/mtd/chips/cfi_cmdset_0001.c
drivers/mtd/devices/elm.c
drivers/mtd/nand/nand_base.c
drivers/mtd/ubi/fastmap.c
drivers/net/bonding/bond_main.c
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/genet/bcmgenet.c
drivers/net/ethernet/broadcom/genet/bcmgenet.h
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/freescale/ucc_geth.c
drivers/net/ethernet/intel/igb/e1000_82575.c
drivers/net/ethernet/intel/igb/e1000_defines.h
drivers/net/ethernet/intel/igb/e1000_hw.h
drivers/net/ethernet/intel/igb/e1000_i210.c
drivers/net/ethernet/intel/igb/e1000_i210.h
drivers/net/ethernet/intel/igb/e1000_regs.h
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/mellanox/mlx4/cq.c
drivers/net/ethernet/mellanox/mlx4/en_cq.c
drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/en_rx.c
drivers/net/ethernet/mellanox/mlx4/en_tx.c
drivers/net/ethernet/mellanox/mlx4/eq.c
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
drivers/net/ethernet/mellanox/mlx5/core/mr.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
drivers/net/ethernet/stmicro/stmmac/enh_desc.c
drivers/net/ethernet/sun/sunvnet.c
drivers/net/fddi/defxx.c
drivers/net/phy/dp83640.c
drivers/net/phy/mdio_bus.c
drivers/net/ppp/ppp_generic.c
drivers/net/ppp/pppoe.c
drivers/net/usb/hso.c
drivers/net/usb/huawei_cdc_ncm.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/r8152.c
drivers/net/usb/smsc95xx.c
drivers/net/wan/farsync.c
drivers/net/wan/x25_asy.c
drivers/net/wireless/ath/ath10k/core.c
drivers/net/wireless/ath/ath10k/htt_rx.c
drivers/net/wireless/brcm80211/brcmfmac/usb.c
drivers/net/wireless/iwlwifi/dvm/rxon.c
drivers/net/wireless/iwlwifi/iwl-fw.h
drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
drivers/net/wireless/iwlwifi/mvm/mac80211.c
drivers/net/wireless/iwlwifi/mvm/scan.c
drivers/net/wireless/iwlwifi/pcie/drv.c
drivers/net/wireless/mwifiex/11n_aggr.c
drivers/net/wireless/mwifiex/cfg80211.c
drivers/net/wireless/mwifiex/cmdevt.c
drivers/net/wireless/mwifiex/main.c
drivers/net/wireless/mwifiex/sta_tx.c
drivers/net/wireless/mwifiex/tdls.c
drivers/net/wireless/mwifiex/txrx.c
drivers/net/wireless/mwifiex/uap_txrx.c
drivers/net/wireless/rt2x00/rt2800usb.c
drivers/net/xen-netback/netback.c
drivers/net/xen-netfront.c
drivers/of/of_mdio.c
drivers/parport/Kconfig
drivers/pci/pci-acpi.c
drivers/pinctrl/pinctrl-st.c
drivers/pnp/pnpacpi/core.c
drivers/s390/char/raw3270.c
drivers/s390/crypto/ap_bus.c
drivers/staging/media/omap4iss/Kconfig
drivers/usb/chipidea/udc.c
drivers/usb/core/hub.c
drivers/xen/balloon.c
drivers/xen/manage.c
fs/aio.c
fs/btrfs/ordered-data.c
fs/btrfs/volumes.c
fs/coredump.c
fs/direct-io.c
fs/fuse/dev.c
fs/fuse/dir.c
fs/fuse/file.c
fs/fuse/inode.c
fs/gfs2/file.c
fs/gfs2/glock.c
fs/gfs2/glops.c
fs/gfs2/lock_dlm.c
fs/gfs2/rgrp.c
fs/namei.c
fs/nfs/direct.c
fs/nfs/internal.h
fs/nfs/nfs3acl.c
fs/nfs/nfs3proc.c
fs/nfs/pagelist.c
fs/nfs/write.c
fs/nfsd/nfs4xdr.c
fs/quota/dquot.c
fs/xattr.c
fs/xfs/xfs_bmap.c
fs/xfs/xfs_bmap.h
fs/xfs/xfs_bmap_util.c
fs/xfs/xfs_bmap_util.h
fs/xfs/xfs_btree.c
fs/xfs/xfs_iomap.c
fs/xfs/xfs_sb.c
include/acpi/acpi_bus.h
include/acpi/acpiosxf.h
include/acpi/acpixf.h
include/acpi/actbl1.h
include/acpi/actbl2.h
include/acpi/actypes.h
include/acpi/ghes.h
include/acpi/platform/acenv.h
include/acpi/platform/aclinux.h
include/acpi/platform/aclinuxex.h
include/linux/acpi.h
include/linux/cpufreq.h
include/linux/libata.h
include/linux/mlx4/device.h
include/linux/mutex.h
include/linux/of_mdio.h
include/linux/osq_lock.h [new file with mode: 0644]
include/linux/pagemap.h
include/linux/pci-acpi.h
include/linux/pm_runtime.h
include/linux/rcupdate.h
include/linux/rwsem-spinlock.h
include/linux/rwsem.h
include/linux/sched.h
include/linux/sfi_acpi.h
include/net/neighbour.h
include/net/netfilter/nf_tables.h
include/net/netns/ieee802154_6lowpan.h
include/net/netns/nftables.h
include/net/sock.h
include/uapi/linux/fuse.h
kernel/Kconfig.locks
kernel/events/core.c
kernel/kprobes.c
kernel/locking/mcs_spinlock.c
kernel/locking/mcs_spinlock.h
kernel/locking/mutex.c
kernel/locking/rwsem-spinlock.c
kernel/locking/rwsem-xadd.c
kernel/locking/rwsem.c
kernel/power/main.c
kernel/power/process.c
kernel/power/suspend.c
kernel/rcu/tree.c
kernel/rcu/tree.h
kernel/rcu/tree_plugin.h
kernel/rcu/update.c
kernel/sched/core.c
kernel/sched/debug.c
kernel/time/alarmtimer.c
kernel/trace/ftrace.c
kernel/trace/ring_buffer.c
kernel/trace/trace.c
kernel/trace/trace_clock.c
kernel/trace/trace_events.c
lib/cpumask.c
mm/hugetlb.c
mm/memory-failure.c
mm/memory.c
mm/migrate.c
mm/rmap.c
mm/shmem.c
mm/slab_common.c
mm/truncate.c
net/8021q/vlan_dev.c
net/appletalk/ddp.c
net/batman-adv/bridge_loop_avoidance.c
net/batman-adv/soft-interface.c
net/batman-adv/translation-table.c
net/batman-adv/types.h
net/bluetooth/hci_conn.c
net/bluetooth/smp.c
net/core/dev.c
net/core/neighbour.c
net/dns_resolver/dns_query.c
net/ipv4/af_inet.c
net/ipv4/gre_demux.c
net/ipv4/gre_offload.c
net/ipv4/icmp.c
net/ipv4/igmp.c
net/ipv4/ip_options.c
net/ipv4/ip_tunnel.c
net/ipv4/route.c
net/ipv4/tcp.c
net/ipv4/tcp_input.c
net/ipv4/tcp_offload.c
net/ipv4/tcp_output.c
net/ipv4/udp.c
net/ipv6/mcast.c
net/ipv6/tcpv6_offload.c
net/ipv6/udp.c
net/l2tp/l2tp_ppp.c
net/mac80211/util.c
net/netfilter/nf_tables_api.c
net/netfilter/nf_tables_core.c
net/netlink/af_netlink.c
net/openvswitch/actions.c
net/openvswitch/datapath.c
net/openvswitch/flow.c
net/openvswitch/flow.h
net/openvswitch/flow_table.c
net/openvswitch/flow_table.h
net/openvswitch/vport-gre.c
net/sched/cls_u32.c
net/sctp/ulpevent.c
net/tipc/bcast.c
net/tipc/msg.c
net/wireless/core.h
net/wireless/nl80211.c
net/wireless/reg.c
sound/firewire/bebob/bebob_maudio.c
sound/pci/hda/hda_controller.c
sound/pci/hda/hda_intel.c
sound/pci/hda/hda_priv.h
sound/pci/hda/hda_tegra.c
sound/pci/hda/patch_hdmi.c
tools/lib/lockdep/include/liblockdep/mutex.h
tools/lib/lockdep/include/liblockdep/rwlock.h
tools/lib/lockdep/preload.c
tools/perf/ui/browsers/hists.c
tools/perf/util/machine.c
tools/power/acpi/Makefile
tools/power/acpi/common/cmfsize.c
tools/power/acpi/common/getopt.c
tools/power/acpi/os_specific/service_layers/oslibcfs.c [new file with mode: 0644]
tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
tools/power/acpi/os_specific/service_layers/osunixxf.c [new file with mode: 0644]
tools/power/acpi/tools/acpidump/acpidump.h
tools/power/acpi/tools/acpidump/apdump.c
tools/power/acpi/tools/acpidump/apfiles.c
tools/power/acpi/tools/acpidump/apmain.c

index fd786ea13a1ffddf7477b191193cdc89e5eadc3e..e182be5e3c83cb553341f7969678370e649fcb2d 100644 (file)
@@ -60,12 +60,6 @@ If the driver needs to perform more complex initialization like getting and
 configuring GPIOs it can get its ACPI handle and extract this information
 from ACPI tables.
 
-Currently the kernel is not able to automatically determine from which ACPI
-device it should make the corresponding platform device so we need to add
-the ACPI device explicitly to acpi_platform_device_ids list defined in
-drivers/acpi/acpi_platform.c. This limitation is only for the platform
-devices, SPI and I2C devices are created automatically as described below.
-
 DMA support
 ~~~~~~~~~~~
 DMA controllers enumerated via ACPI should be registered in the system to
index f055515d2b62472c2507f2ebd2b69091dfd9b89d..366690cb86a3065768b74dbf8136a9c560f94155 100644 (file)
@@ -8,10 +8,12 @@ Both required and optional properties listed below must be defined
 under node /cpus/cpu@0.
 
 Required properties:
-- operating-points: Refer to Documentation/devicetree/bindings/power/opp.txt
-  for details
+- None
 
 Optional properties:
+- operating-points: Refer to Documentation/devicetree/bindings/power/opp.txt for
+  details. OPPs *must* be supplied either via DT, i.e. this property, or
+  populated at runtime.
 - clock-latency: Specify the possible maximum transition latency for clock,
   in unit of nanoseconds.
 - voltage-tolerance: Specify the CPU voltage tolerance in percentage.
index f1ea2c69648dcad46d061a78ac33036d3ac5c727..c587a966413e8597da3241f851db92f6e6df1d81 100644 (file)
@@ -281,6 +281,19 @@ gestures can normally be extracted from it.
 If INPUT_PROP_SEMI_MT is not set, the device is assumed to be a true MT
 device.
 
+INPUT_PROP_TOPBUTTONPAD:
+-----------------------
+Some laptops, most notably the Lenovo *40 series provide a trackstick
+device but do not have physical buttons associated with the trackstick
+device. Instead, the top area of the touchpad is marked to show
+visual/haptic areas for left, middle, right buttons intended to be used
+with the trackstick.
+
+If INPUT_PROP_TOPBUTTONPAD is set, userspace should emulate buttons
+accordingly. This property does not affect kernel behavior.
+The kernel does not provide button emulation for such devices but treats
+them as any other INPUT_PROP_BUTTONPAD device.
+
 Guidelines:
 ==========
 The guidelines below ensure proper single-touch and multi-finger functionality.
index c1b9aa8c5a52e807e6458d40d96d4f1bc91f107b..b7fa2f599459b67cb27586bea9869d83a033986f 100644 (file)
@@ -2790,6 +2790,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        leaf rcu_node structure.  Useful for very large
                        systems.
 
+       rcutree.jiffies_till_sched_qs= [KNL]
+                       Set required age in jiffies for a
+                       given grace period before RCU starts
+                       soliciting quiescent-state help from
+                       rcu_note_context_switch().
+
        rcutree.jiffies_till_first_fqs= [KNL]
                        Set delay from grace-period initialization to
                        first attempt to force quiescent states.
@@ -3526,7 +3532,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        the allocated input device; If set to 0, video driver
                        will only send out the event without touching backlight
                        brightness level.
-                       default: 0
+                       default: 1
 
        virtio_mmio.device=
                        [VMMIO] Memory mapped virtio (platform) device.
index e31c87474739329ec0b2458ad26a40f7251f3874..86efa7e213c257e59f4285d30bad7555b259c566 100644 (file)
@@ -156,7 +156,6 @@ F:  drivers/net/hamradio/6pack.c
 
 8169 10/100/1000 GIGABIT ETHERNET DRIVER
 M:     Realtek linux nic maintainers <nic_swsd@realtek.com>
-M:     Francois Romieu <romieu@fr.zoreil.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     drivers/net/ethernet/realtek/r8169.c
@@ -4511,8 +4510,7 @@ S:        Supported
 F:     drivers/idle/i7300_idle.c
 
 IEEE 802.15.4 SUBSYSTEM
-M:     Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
-M:     Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
+M:     Alexander Aring <alex.aring@gmail.com>
 L:     linux-zigbee-devel@lists.sourceforge.net (moderated for non-subscribers)
 W:     http://apps.sourceforge.net/trac/linux-zigbee
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/lowpan/lowpan.git
@@ -6958,6 +6956,12 @@ L:       linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     drivers/pinctrl/pinctrl-at91.c
 
+PIN CONTROLLER - RENESAS
+M:     Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+L:     linux-sh@vger.kernel.org
+S:     Maintained
+F:     drivers/pinctrl/sh-pfc/
+
 PIN CONTROLLER - SAMSUNG
 M:     Tomasz Figa <t.figa@samsung.com>
 M:     Thomas Abraham <thomas.abraham@linaro.org>
@@ -8021,6 +8025,16 @@ F:       drivers/ata/
 F:     include/linux/ata.h
 F:     include/linux/libata.h
 
+SERIAL ATA AHCI PLATFORM devices support
+M:     Hans de Goede <hdegoede@redhat.com>
+M:     Tejun Heo <tj@kernel.org>
+L:     linux-ide@vger.kernel.org
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
+S:     Supported
+F:     drivers/ata/ahci_platform.c
+F:     drivers/ata/libahci_platform.c
+F:     include/linux/ahci_platform.h
+
 SERVER ENGINES 10Gbps iSCSI - BladeEngine 2 DRIVER
 M:     Jayamohan Kallickal <jayamohan.kallickal@emulex.com>
 L:     linux-scsi@vger.kernel.org
index f3c543df4697f8e3cb39b31520d6ce61aa3bee84..f6a7794e4db436c16246389642f86de3c9098699 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 16
 SUBLEVEL = 0
-EXTRAVERSION = -rc5
+EXTRAVERSION = -rc7
 NAME = Shuffling Zombie Juror
 
 # *DOCUMENTATION*
@@ -688,6 +688,8 @@ KBUILD_CFLAGS       += -fomit-frame-pointer
 endif
 endif
 
+KBUILD_CFLAGS   += $(call cc-option, -fno-var-tracking-assignments)
+
 ifdef CONFIG_DEBUG_INFO
 KBUILD_CFLAGS  += -g
 KBUILD_AFLAGS  += -Wa,-gdwarf-2
index 245058b3b0ef7d5d27b7c113d6199127d8cd5a8c..88acf8bc1490a6cc150029f82e8965db80970257 100644 (file)
@@ -6,6 +6,7 @@ config ARM
        select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
        select ARCH_HAVE_CUSTOM_GPIO_H
        select ARCH_MIGHT_HAVE_PC_PARPORT
+       select ARCH_SUPPORTS_ATOMIC_RMW
        select ARCH_USE_BUILTIN_BSWAP
        select ARCH_USE_CMPXCHG_LOCKREF
        select ARCH_WANT_IPC_PARSE_VERSION
index 287795985e32f1590090219599134f0447543cb5..b84bac5bada400369450033f419e271224b96e19 100644 (file)
                        compatible = "atmel,at91rm9200-ohci", "usb-ohci";
                        reg = <0x00500000 0x00100000>;
                        interrupts = <22 IRQ_TYPE_LEVEL_HIGH 2>;
-                       clocks = <&usb>, <&uhphs_clk>, <&udphs_clk>,
+                       clocks = <&usb>, <&uhphs_clk>, <&uhphs_clk>,
                                 <&uhpck>;
                        clock-names = "usb_clk", "ohci_clk", "hclk", "uhpck";
                        status = "disabled";
index 2ebc42140ea6bd527b57181e51c59eaf9c3c2ae1..2c0d6ea3ab412325252d4515d8c5b6af3e23425f 100644 (file)
                                compatible = "atmel,at91sam9rl-pwm";
                                reg = <0xf8034000 0x300>;
                                interrupts = <18 IRQ_TYPE_LEVEL_HIGH 4>;
+                               clocks = <&pwm_clk>;
                                #pwm-cells = <3>;
                                status = "disabled";
                        };
                        compatible = "atmel,at91rm9200-ohci", "usb-ohci";
                        reg = <0x00600000 0x100000>;
                        interrupts = <22 IRQ_TYPE_LEVEL_HIGH 2>;
-                       clocks = <&usb>, <&uhphs_clk>, <&udphs_clk>,
-                                <&uhpck>;
+                       clocks = <&usb>, <&uhphs_clk>, <&uhphs_clk>, <&uhpck>;
                        clock-names = "usb_clk", "ohci_clk", "hclk", "uhpck";
                        status = "disabled";
                };
index 9d853189028bb0c79ad72557b018c12d5416aa67..e35d880f9773e675805ad27c830d4124839c7bfc 100644 (file)
@@ -275,7 +275,7 @@ void store_cpu_topology(unsigned int cpuid)
                cpu_topology[cpuid].socket_id, mpidr);
 }
 
-static inline const int cpu_corepower_flags(void)
+static inline int cpu_corepower_flags(void)
 {
        return SD_SHARE_PKG_RESOURCES  | SD_SHARE_POWERDOMAIN;
 }
index 8a134d019cb3af0ab7d792ae3ca177dd039df95d..920a4baa53cd7f4eb290e75d1f0e5c62e4ca8cdd 100644 (file)
@@ -40,15 +40,17 @@ static inline void cpu_leave_lowpower(void)
 
 static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
 {
+       u32 mpidr = cpu_logical_map(cpu);
+       u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+
        for (;;) {
 
-               /* make cpu1 to be turned off at next WFI command */
-               if (cpu == 1)
-                       exynos_cpu_power_down(cpu);
+               /* Turn the CPU off on next WFI instruction. */
+               exynos_cpu_power_down(core_id);
 
                wfi();
 
-               if (pen_release == cpu_logical_map(cpu)) {
+               if (pen_release == core_id) {
                        /*
                         * OK, proper wakeup, we're done
                         */
index 1c8d31e39520005f697974acb4b6cb021384a9b2..50b9aad5e27b729acc2aa060be4d604d866ef07b 100644 (file)
@@ -90,7 +90,8 @@ static void exynos_secondary_init(unsigned int cpu)
 static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
 {
        unsigned long timeout;
-       unsigned long phys_cpu = cpu_logical_map(cpu);
+       u32 mpidr = cpu_logical_map(cpu);
+       u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
        int ret = -ENOSYS;
 
        /*
@@ -104,17 +105,18 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
         * the holding pen - release it, then wait for it to flag
         * that it has been released by resetting pen_release.
         *
-        * Note that "pen_release" is the hardware CPU ID, whereas
+        * Note that "pen_release" is the hardware CPU core ID, whereas
         * "cpu" is Linux's internal ID.
         */
-       write_pen_release(phys_cpu);
+       write_pen_release(core_id);
 
-       if (!exynos_cpu_power_state(cpu)) {
-               exynos_cpu_power_up(cpu);
+       if (!exynos_cpu_power_state(core_id)) {
+               exynos_cpu_power_up(core_id);
                timeout = 10;
 
                /* wait max 10 ms until cpu1 is on */
-               while (exynos_cpu_power_state(cpu) != S5P_CORE_LOCAL_PWR_EN) {
+               while (exynos_cpu_power_state(core_id)
+                      != S5P_CORE_LOCAL_PWR_EN) {
                        if (timeout-- == 0)
                                break;
 
@@ -145,20 +147,20 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
                 * Try to set boot address using firmware first
                 * and fall back to boot register if it fails.
                 */
-               ret = call_firmware_op(set_cpu_boot_addr, phys_cpu, boot_addr);
+               ret = call_firmware_op(set_cpu_boot_addr, core_id, boot_addr);
                if (ret && ret != -ENOSYS)
                        goto fail;
                if (ret == -ENOSYS) {
-                       void __iomem *boot_reg = cpu_boot_reg(phys_cpu);
+                       void __iomem *boot_reg = cpu_boot_reg(core_id);
 
                        if (IS_ERR(boot_reg)) {
                                ret = PTR_ERR(boot_reg);
                                goto fail;
                        }
-                       __raw_writel(boot_addr, cpu_boot_reg(phys_cpu));
+                       __raw_writel(boot_addr, cpu_boot_reg(core_id));
                }
 
-               call_firmware_op(cpu_boot, phys_cpu);
+               call_firmware_op(cpu_boot, core_id);
 
                arch_send_wakeup_ipi_mask(cpumask_of(cpu));
 
@@ -227,22 +229,24 @@ static void __init exynos_smp_prepare_cpus(unsigned int max_cpus)
         * boot register if it fails.
         */
        for (i = 1; i < max_cpus; ++i) {
-               unsigned long phys_cpu;
                unsigned long boot_addr;
+               u32 mpidr;
+               u32 core_id;
                int ret;
 
-               phys_cpu = cpu_logical_map(i);
+               mpidr = cpu_logical_map(i);
+               core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
                boot_addr = virt_to_phys(exynos4_secondary_startup);
 
-               ret = call_firmware_op(set_cpu_boot_addr, phys_cpu, boot_addr);
+               ret = call_firmware_op(set_cpu_boot_addr, core_id, boot_addr);
                if (ret && ret != -ENOSYS)
                        break;
                if (ret == -ENOSYS) {
-                       void __iomem *boot_reg = cpu_boot_reg(phys_cpu);
+                       void __iomem *boot_reg = cpu_boot_reg(core_id);
 
                        if (IS_ERR(boot_reg))
                                break;
-                       __raw_writel(boot_addr, cpu_boot_reg(phys_cpu));
+                       __raw_writel(boot_addr, cpu_boot_reg(core_id));
                }
        }
 }
index 8e795dea02ece013f4bb0dc9caa1bda63bc5ffc1..8556c787e59ca89de93f370bd90745b953dbeff9 100644 (file)
@@ -70,7 +70,7 @@ static const char *cko_sels[] = { "cko1", "cko2", };
 static const char *lvds_sels[] = {
        "dummy", "dummy", "dummy", "dummy", "dummy", "dummy",
        "pll4_audio", "pll5_video", "pll8_mlb", "enet_ref",
-       "pcie_ref", "sata_ref",
+       "pcie_ref_125m", "sata_ref_100m",
 };
 
 enum mx6q_clks {
@@ -491,7 +491,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
 
        /* All existing boards with PCIe use LVDS1 */
        if (IS_ENABLED(CONFIG_PCI_IMX6))
-               clk_set_parent(clk[lvds1_sel], clk[sata_ref]);
+               clk_set_parent(clk[lvds1_sel], clk[sata_ref_100m]);
 
        /* Set initial power mode */
        imx6q_set_lpm(WAIT_CLOCKED);
index 477202fd39cc0572e0c678b33084efb7de0b69b1..2bdc3233abe2bcc78c527bf8efe4b0032a5880dc 100644 (file)
@@ -292,6 +292,10 @@ static struct notifier_block mvebu_hwcc_nb = {
        .notifier_call = mvebu_hwcc_notifier,
 };
 
+static struct notifier_block mvebu_hwcc_pci_nb = {
+       .notifier_call = mvebu_hwcc_notifier,
+};
+
 static void __init armada_370_coherency_init(struct device_node *np)
 {
        struct resource res;
@@ -427,7 +431,7 @@ static int __init coherency_pci_init(void)
 {
        if (coherency_available())
                bus_register_notifier(&pci_bus_type,
-                                      &mvebu_hwcc_nb);
+                                      &mvebu_hwcc_pci_nb);
        return 0;
 }
 
index 5925366bc03cccd8e7d576ca8ca63f3c0585bb9b..da5bb292b91cf3fe55b5217e5ae8db626b10072f 100644 (file)
@@ -15,6 +15,8 @@
 #include <linux/linkage.h>
 #include <linux/init.h>
 
+#include <asm/assembler.h>
+
        __CPUINIT
 #define CPU_RESUME_ADDR_REG 0xf10182d4
 
 .global armada_375_smp_cpu1_enable_code_end
 
 armada_375_smp_cpu1_enable_code_start:
-       ldr     r0, [pc, #4]
+ARM_BE8(setend be)
+       adr     r0, 1f
+       ldr     r0, [r0]
        ldr     r1, [r0]
+ARM_BE8(rev    r1, r1)
        mov     pc, r1
+1:
        .word   CPU_RESUME_ADDR_REG
 armada_375_smp_cpu1_enable_code_end:
 
 ENTRY(mvebu_cortex_a9_secondary_startup)
+ARM_BE8(setend be)
        bl      v7_invalidate_l1
        b       secondary_startup
 ENDPROC(mvebu_cortex_a9_secondary_startup)
index a1d407c0febe9f673dab48ece9099d699eb64d76..25aa8237d66844ca5523ca6b81f1c48e8e2a7cb1 100644 (file)
@@ -201,12 +201,12 @@ static noinline int do_armada_370_xp_cpu_suspend(unsigned long deepidle)
 
        /* Test the CR_C bit and set it if it was cleared */
        asm volatile(
-       "mrc    p15, 0, %0, c1, c0, 0 \n\t"
-       "tst    %0, #(1 << 2) \n\t"
-       "orreq  %0, %0, #(1 << 2) \n\t"
-       "mcreq  p15, 0, %0, c1, c0, 0 \n\t"
+       "mrc    p15, 0, r0, c1, c0, 0 \n\t"
+       "tst    r0, #(1 << 2) \n\t"
+       "orreq  r0, r0, #(1 << 2) \n\t"
+       "mcreq  p15, 0, r0, c1, c0, 0 \n\t"
        "isb    "
-       : : "r" (0));
+       : : : "r0");
 
        pr_warn("Failed to suspend the system\n");
 
index a474de346be665270f7e50278667dfc4bc16cc72..839f48c26ef0291019126df3213afcc656e1d0c4 100644 (file)
@@ -4,6 +4,7 @@ config ARM64
        select ARCH_HAS_OPP
        select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
        select ARCH_USE_CMPXCHG_LOCKREF
+       select ARCH_SUPPORTS_ATOMIC_RMW
        select ARCH_WANT_OPTIONAL_GPIOLIB
        select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
        select ARCH_WANT_FRAME_POINTERS
index 60e98a639ac55306a1de8b5b95ed4633fba89678..e786e6cdc400df422984bb0c17f93aacc6cb42ed 100644 (file)
@@ -12,8 +12,6 @@
 #include <linux/efi.h>
 #include <linux/libfdt.h>
 #include <asm/sections.h>
-#include <generated/compile.h>
-#include <generated/utsrelease.h>
 
 /*
  * AArch64 requires the DTB to be 8-byte aligned in the first 512MiB from
index f43db8a6926208f9b9419c3114d1c41c13514566..e90c5426fe14e5a1212706e3802aa32325c77d95 100644 (file)
@@ -60,6 +60,17 @@ static int __init early_initrd(char *p)
 early_param("initrd", early_initrd);
 #endif
 
+/*
+ * Return the maximum physical address for ZONE_DMA (DMA_BIT_MASK(32)). It
+ * currently assumes that for memory starting above 4G, 32-bit devices will
+ * use a DMA offset.
+ */
+static phys_addr_t max_zone_dma_phys(void)
+{
+       phys_addr_t offset = memblock_start_of_DRAM() & GENMASK_ULL(63, 32);
+       return min(offset + (1ULL << 32), memblock_end_of_DRAM());
+}
+
 static void __init zone_sizes_init(unsigned long min, unsigned long max)
 {
        struct memblock_region *reg;
@@ -70,9 +81,7 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
 
        /* 4GB maximum for 32-bit only capable devices */
        if (IS_ENABLED(CONFIG_ZONE_DMA)) {
-               unsigned long max_dma_phys =
-                       (unsigned long)(dma_to_phys(NULL, DMA_BIT_MASK(32)) + 1);
-               max_dma = max(min, min(max, max_dma_phys >> PAGE_SHIFT));
+               max_dma = PFN_DOWN(max_zone_dma_phys());
                zone_size[ZONE_DMA] = max_dma - min;
        }
        zone_size[ZONE_NORMAL] = max - max_dma;
@@ -146,7 +155,7 @@ void __init arm64_memblock_init(void)
 
        /* 4GB maximum for 32-bit only capable devices */
        if (IS_ENABLED(CONFIG_ZONE_DMA))
-               dma_phys_limit = dma_to_phys(NULL, DMA_BIT_MASK(32)) + 1;
+               dma_phys_limit = max_zone_dma_phys();
        dma_contiguous_reserve(dma_phys_limit);
 
        memblock_allow_resize();
index a7e9bfd84183d5b7c26924679f9bb33d88ac0118..fcec5ce71392d20712b15516c3786e43513b0d10 100644 (file)
@@ -102,7 +102,7 @@ CONFIG_I2C_CHARDEV=y
 CONFIG_I2C_BLACKFIN_TWI=y
 CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100
 CONFIG_SPI=y
-CONFIG_SPI_BFIN_V3=y
+CONFIG_SPI_ADI_V3=y
 CONFIG_GPIOLIB=y
 CONFIG_GPIO_SYSFS=y
 # CONFIG_HWMON is not set
index ba35864b2b74b50e3ad32527419a83ef6abb9629..c9eec84aa258628d1c9d70fc545420dde30c8bc1 100644 (file)
@@ -145,7 +145,7 @@ SECTIONS
 
        .text_l1 L1_CODE_START : AT(LOADADDR(.exit.data) + SIZEOF(.exit.data))
 #else
-       .init.data : AT(__data_lma + __data_len)
+       .init.data : AT(__data_lma + __data_len + 32)
        {
                __sinitdata = .;
                INIT_DATA
index 63b0e4fe760cd558d20a2f8359c1c975180b9c11..0ccf0cf4daaf92f8d38aa18962defd8a10707e7d 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/spi/spi.h>
 #include <linux/spi/flash.h>
 #include <linux/irq.h>
+#include <linux/gpio.h>
 #include <linux/i2c.h>
 #include <asm/dma.h>
 #include <asm/bfin5xx_spi.h>
index c65c6dbda3da9af86231d299b4502f0d3d72edb9..1e7290ef35258da56c5521ed23a1b637286b7342 100644 (file)
@@ -21,6 +21,7 @@
 #endif
 #include <linux/ata_platform.h>
 #include <linux/irq.h>
+#include <linux/gpio.h>
 #include <asm/dma.h>
 #include <asm/bfin5xx_spi.h>
 #include <asm/portmux.h>
index af58454b4bff3fdc4bffda53d5d252726fd4e4f8..c7495dc74690db99f52bfa30c94c62dd5faae63c 100644 (file)
@@ -21,6 +21,7 @@
 #endif
 #include <linux/ata_platform.h>
 #include <linux/irq.h>
+#include <linux/gpio.h>
 #include <asm/dma.h>
 #include <asm/bfin5xx_spi.h>
 #include <asm/portmux.h>
index a0211225748d23f0d0f14cdba4fd99a34aa8208f..6b988ad653d8d1e69a1721ddfe4b7208bf7cebb9 100644 (file)
@@ -21,6 +21,7 @@
 #endif
 #include <linux/ata_platform.h>
 #include <linux/irq.h>
+#include <linux/gpio.h>
 #include <asm/dma.h>
 #include <asm/bfin5xx_spi.h>
 #include <asm/portmux.h>
index 90138e6112c14b2f186640f22e61368a517fab94..1fe7ff286619f693c113faeac592ce3167d8de72 100644 (file)
@@ -2118,7 +2118,7 @@ static struct pinctrl_map __initdata bfin_pinmux_map[] = {
        PIN_MAP_MUX_GROUP_DEFAULT("bfin-rotary",  "pinctrl-adi2.0", NULL, "rotary"),
        PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.0",  "pinctrl-adi2.0", NULL, "can0"),
        PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.1",  "pinctrl-adi2.0", NULL, "can1"),
-       PIN_MAP_MUX_GROUP_DEFAULT("bf54x-lq043",  "pinctrl-adi2.0", NULL, "ppi0_24b"),
+       PIN_MAP_MUX_GROUP_DEFAULT("bf54x-lq043",  "pinctrl-adi2.0", "ppi0_24bgrp", "ppi0"),
        PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.0",  "pinctrl-adi2.0", NULL, "sport0"),
        PIN_MAP_MUX_GROUP_DEFAULT("bfin-tdm.0",  "pinctrl-adi2.0", NULL, "sport0"),
        PIN_MAP_MUX_GROUP_DEFAULT("bfin-ac97.0",  "pinctrl-adi2.0", NULL, "sport0"),
@@ -2140,7 +2140,9 @@ static struct pinctrl_map __initdata bfin_pinmux_map[] = {
        PIN_MAP_MUX_GROUP_DEFAULT("pata-bf54x",  "pinctrl-adi2.0", NULL, "atapi_alter"),
 #endif
        PIN_MAP_MUX_GROUP_DEFAULT("bf5xx-nand.0",  "pinctrl-adi2.0", NULL, "nfc0"),
-       PIN_MAP_MUX_GROUP_DEFAULT("bf54x-keys",  "pinctrl-adi2.0", NULL, "keys_4x4"),
+       PIN_MAP_MUX_GROUP_DEFAULT("bf54x-keys",  "pinctrl-adi2.0", "keys_4x4grp", "keys"),
+       PIN_MAP_MUX_GROUP("bf54x-keys", "4bit",  "pinctrl-adi2.0", "keys_4x4grp", "keys"),
+       PIN_MAP_MUX_GROUP("bf54x-keys", "8bit",  "pinctrl-adi2.0", "keys_8x8grp", "keys"),
 };
 
 static int __init ezkit_init(void)
index 430b16d5ccb1124f6fd896d4c8651050695d9053..6ab951534d790b6060aeea63cda8c1b158d90470 100644 (file)
@@ -44,6 +44,7 @@
 #include <linux/spi/flash.h>
 #include <linux/irq.h>
 #include <linux/interrupt.h>
+#include <linux/gpio.h>
 #include <linux/jiffies.h>
 #include <linux/i2c-pca-platform.h>
 #include <linux/delay.h>
index 9f777df4cacce9e77ada5ec46ff1698a72fbe419..e862f7823e68db1b5ea374a295982c23db2800c8 100644 (file)
@@ -18,6 +18,7 @@
 #endif
 #include <linux/ata_platform.h>
 #include <linux/irq.h>
+#include <linux/gpio.h>
 #include <asm/dma.h>
 #include <asm/bfin5xx_spi.h>
 #include <asm/portmux.h>
index 88dee43e7abe8c4e34da9016b2333c539ef6b9a2..2de71e8c104b1e2233c7eba47c42ad28d2ab60c9 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/spi/spi.h>
 #include <linux/irq.h>
 #include <linux/interrupt.h>
+#include <linux/gpio.h>
 #include <linux/delay.h>
 #include <asm/dma.h>
 #include <asm/bfin5xx_spi.h>
index 1ba4600de69f72b37ca503f7aab752b9f4dcdb97..e2c0b024ce88f2593f4551b711b3821d8ae3879e 100644 (file)
@@ -698,8 +698,6 @@ int bf609_nor_flash_init(struct platform_device *pdev)
 {
 #define CONFIG_SMC_GCTL_VAL     0x00000010
 
-       if (!devm_pinctrl_get_select_default(&pdev->dev))
-               return -EBUSY;
        bfin_write32(SMC_GCTL, CONFIG_SMC_GCTL_VAL);
        bfin_write32(SMC_B0CTL, 0x01002011);
        bfin_write32(SMC_B0TIM, 0x08170977);
@@ -709,7 +707,6 @@ int bf609_nor_flash_init(struct platform_device *pdev)
 
 void bf609_nor_flash_exit(struct platform_device *pdev)
 {
-       devm_pinctrl_put(pdev->dev.pins->p);
        bfin_write32(SMC_GCTL, 0);
 }
 
@@ -2058,15 +2055,14 @@ static struct pinctrl_map __initdata bfin_pinmux_map[] = {
        PIN_MAP_MUX_GROUP_DEFAULT("bfin-rotary",  "pinctrl-adi2.0", NULL, "rotary"),
        PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.0",  "pinctrl-adi2.0", NULL, "can0"),
        PIN_MAP_MUX_GROUP_DEFAULT("physmap-flash.0",  "pinctrl-adi2.0", NULL, "smc0"),
-       PIN_MAP_MUX_GROUP_DEFAULT("bf609_nl8048.2",  "pinctrl-adi2.0", NULL, "ppi2_16b"),
-       PIN_MAP_MUX_GROUP_DEFAULT("bfin_display.0",  "pinctrl-adi2.0", NULL, "ppi0_16b"),
-#if IS_ENABLED(CONFIG_VIDEO_MT9M114)
-       PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0",  "pinctrl-adi2.0", NULL, "ppi0_8b"),
-#elif IS_ENABLED(CONFIG_VIDEO_VS6624)
-       PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0",  "pinctrl-adi2.0", NULL, "ppi0_16b"),
-#else
-       PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0",  "pinctrl-adi2.0", NULL, "ppi0_24b"),
-#endif
+       PIN_MAP_MUX_GROUP_DEFAULT("bf609_nl8048.2",  "pinctrl-adi2.0", "ppi2_16bgrp", "ppi2"),
+       PIN_MAP_MUX_GROUP("bfin_display.0", "8bit",  "pinctrl-adi2.0", "ppi2_8bgrp", "ppi2"),
+       PIN_MAP_MUX_GROUP_DEFAULT("bfin_display.0",  "pinctrl-adi2.0", "ppi2_16bgrp", "ppi2"),
+       PIN_MAP_MUX_GROUP("bfin_display.0", "16bit",  "pinctrl-adi2.0", "ppi2_16bgrp", "ppi2"),
+       PIN_MAP_MUX_GROUP("bfin_capture.0", "8bit",  "pinctrl-adi2.0", "ppi0_8bgrp", "ppi0"),
+       PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0",  "pinctrl-adi2.0", "ppi0_16bgrp", "ppi0"),
+       PIN_MAP_MUX_GROUP("bfin_capture.0", "16bit",  "pinctrl-adi2.0", "ppi0_16bgrp", "ppi0"),
+       PIN_MAP_MUX_GROUP("bfin_capture.0", "24bit",  "pinctrl-adi2.0", "ppi0_24bgrp", "ppi0"),
        PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.0",  "pinctrl-adi2.0", NULL, "sport0"),
        PIN_MAP_MUX_GROUP_DEFAULT("bfin-tdm.0",  "pinctrl-adi2.0", NULL, "sport0"),
        PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.1",  "pinctrl-adi2.0", NULL, "sport1"),
index 3ca0fb965636ed3fad126ae021d9d87a6518ea8a..a1efd936dd30d2ec18e08c9d5b76bae6afabe99b 100644 (file)
@@ -10,6 +10,7 @@
 #define __MACH_BF609_PM_H__
 
 #include <linux/suspend.h>
+#include <linux/platform_device.h>
 
 extern int bfin609_pm_enter(suspend_state_t state);
 extern int bf609_pm_prepare(void);
@@ -19,6 +20,6 @@ void bf609_hibernate(void);
 void bfin_sec_raise_irq(unsigned int sid);
 void coreb_enable(void);
 
-int bf609_nor_flash_init(void);
-void bf609_nor_flash_exit(void);
+int bf609_nor_flash_init(struct platform_device *pdev);
+void bf609_nor_flash_exit(struct platform_device *pdev);
 #endif
index 0cdd6955c7be5a80a2cb7d406b3fefb1eac9a9a9..b1bfcf434d16cfc25e999222c5c972f3f110d519 100644 (file)
@@ -291,13 +291,13 @@ static struct bfin_cpu_pm_fns bf609_cpu_pm = {
 #if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE)
 static int smc_pm_syscore_suspend(void)
 {
-       bf609_nor_flash_exit();
+       bf609_nor_flash_exit(NULL);
        return 0;
 }
 
 static void smc_pm_syscore_resume(void)
 {
-       bf609_nor_flash_init();
+       bf609_nor_flash_init(NULL);
 }
 
 static struct syscore_ops smc_pm_syscore_ops = {
index 867b7cef204cb91f0ed15e9386ba7ea0b2089d96..1f94784eab6d79b7f63673e20e2039fb41a07edb 100644 (file)
@@ -1208,8 +1208,6 @@ int __init init_arch_irq(void)
 
        bfin_sec_set_priority(CONFIG_SEC_IRQ_PRIORITY_LEVELS, sec_int_priority);
 
-       bfin_sec_set_priority(CONFIG_SEC_IRQ_PRIORITY_LEVELS, sec_int_priority);
-
        /* Enable interrupts IVG7-15 */
        bfin_irq_flags |= IMASK_IVG15 |
            IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
index 2f3abcf8f6bc9b2b206652b52cf1318f93acb74a..44a6915ab13d474d53457e0d8c1a62838974b7ac 100644 (file)
@@ -10,6 +10,7 @@ config IA64
        select ARCH_MIGHT_HAVE_PC_SERIO
        select PCI if (!IA64_HP_SIM)
        select ACPI if (!IA64_HP_SIM)
+       select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI
        select PM if (!IA64_HP_SIM)
        select HAVE_UNSTABLE_SCHED_CLOCK
        select HAVE_IDE
index 3f9eaeec98734852e98830969ccf826d399dabde..35ff13afbf345df260ecc30045f91ca1a77fce42 100644 (file)
@@ -19,8 +19,6 @@
 
 /* Asm macros */
 
-#ifdef CONFIG_ACPI
-
 static inline int
 ia64_acpi_acquire_global_lock(unsigned int *lock)
 {
@@ -51,6 +49,4 @@ ia64_acpi_release_global_lock(unsigned int *lock)
 #define ACPI_RELEASE_GLOBAL_LOCK(facs, Acq)                            \
        ((Acq) = ia64_acpi_release_global_lock(&facs->global_lock))
 
-#endif
-
 #endif /* _ASM_IA64_ACENV_H */
index 75dc59a793d61ce1f243526764056edd434330bf..a1d91ab4c5ef2010edb46e0a54ea0f016600581f 100644 (file)
@@ -40,6 +40,11 @@ extern int acpi_lapic;
 #define acpi_noirq 0   /* ACPI always enabled on IA64 */
 #define acpi_pci_disabled 0 /* ACPI PCI always enabled on IA64 */
 #define acpi_strict 1  /* no ACPI spec workarounds on IA64 */
+
+static inline bool acpi_has_cpu_in_madt(void)
+{
+       return !!acpi_lapic;
+}
 #endif
 #define acpi_processor_cstate_check(x) (x) /* no idle limits on IA64 :) */
 static inline void disable_acpi(void) { }
index a2fa297196bc19f1de4f021ebec6e5338b237313..f5645d6a89f2c9c79e8e22cd9a7201df6dc389df 100644 (file)
@@ -69,8 +69,6 @@
 #define SA_NOMASK      SA_NODEFER
 #define SA_ONESHOT     SA_RESETHAND
 
-#define SA_RESTORER    0x04000000 /* obsolete -- ignored */
-
 #define MINSIGSTKSZ    2048
 #define SIGSTKSZ       8192
 
index ae085ad0fba03827df21874edfb6a6985c3338f8..0bef864264c0bb1e0d445c184ef79e51c9153caf 100644 (file)
@@ -728,7 +728,6 @@ static void __init pagetable_init(void)
 #endif
 
        empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
-       memset(empty_zero_page, 0, PAGE_SIZE);
 }
 
 static void __init gateway_init(void)
index fefe7c8bf05f54725b2edb7acbcc801c95c6431c..80b94b0add1f494e600db71170a966c2271c5e58 100644 (file)
@@ -145,6 +145,7 @@ config PPC
        select HAVE_IRQ_EXIT_ON_IRQ_STACK
        select ARCH_USE_CMPXCHG_LOCKREF if PPC64
        select HAVE_ARCH_AUDITSYSCALL
+       select ARCH_SUPPORTS_ATOMIC_RMW
 
 config GENERIC_CSUM
        def_bool CPU_LITTLE_ENDIAN
index bc2347774f0ad4ed111a5c98546fb763de6d8bbd..0fdd7eece6d91a3183a5857967ad910998d33c09 100644 (file)
@@ -447,6 +447,7 @@ extern const char *powerpc_base_platform;
            CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \
            CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP)
 #define CPU_FTRS_POWER8E (CPU_FTRS_POWER8 | CPU_FTR_PMAO_BUG)
+#define CPU_FTRS_POWER8_DD1 (CPU_FTRS_POWER8 & ~CPU_FTR_DBELL)
 #define CPU_FTRS_CELL  (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
            CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
            CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
index fddb72b48ce9bd2a9a4e25460c2ff5cc4bc15ad5..d645428a65a411c187768bd296cc725e09fc2da6 100644 (file)
@@ -198,8 +198,10 @@ static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
        return rb;
 }
 
-static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
+static inline unsigned long __hpte_page_size(unsigned long h, unsigned long l,
+                                            bool is_base_size)
 {
+
        int size, a_psize;
        /* Look at the 8 bit LP value */
        unsigned int lp = (l >> LP_SHIFT) & ((1 << LP_BITS) - 1);
@@ -214,14 +216,27 @@ static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
                                continue;
 
                        a_psize = __hpte_actual_psize(lp, size);
-                       if (a_psize != -1)
+                       if (a_psize != -1) {
+                               if (is_base_size)
+                                       return 1ul << mmu_psize_defs[size].shift;
                                return 1ul << mmu_psize_defs[a_psize].shift;
+                       }
                }
 
        }
        return 0;
 }
 
+static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
+{
+       return __hpte_page_size(h, l, 0);
+}
+
+static inline unsigned long hpte_base_page_size(unsigned long h, unsigned long l)
+{
+       return __hpte_page_size(h, l, 1);
+}
+
 static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize)
 {
        return ((ptel & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT;
index 807014dde821058429b41a5d825759d50d4d9ed7..c2b4dcf23d03768427fb5a42a134b86d02e8b0e8 100644 (file)
@@ -22,6 +22,7 @@
  */
 #include <asm/pgtable-ppc64.h>
 #include <asm/bug.h>
+#include <asm/processor.h>
 
 /*
  * Segment table
@@ -496,7 +497,7 @@ extern void slb_set_size(u16 size);
  */
 struct subpage_prot_table {
        unsigned long maxaddr;  /* only addresses < this are protected */
-       unsigned int **protptrs[2];
+       unsigned int **protptrs[(TASK_SIZE_USER64 >> 43)];
        unsigned int *low_prot[4];
 };
 
index 9ea266eae33e235cde422eaaf8c86e9fc9275236..7e4612528546b710cb13d7b9346d394d5cd4d99e 100644 (file)
@@ -277,6 +277,8 @@ n:
        .globl n;       \
 n:
 
+#define _GLOBAL_TOC(name) _GLOBAL(name)
+
 #define _KPROBE(n)     \
        .section ".kprobes.text","a";   \
        .globl  n;      \
index 965291b4c2fa15a9f6f50cd8ca8b1e3a3067db02..0c157642c2a140a5be7cf26d677f5fae2fe05816 100644 (file)
@@ -527,6 +527,26 @@ static struct cpu_spec __initdata cpu_specs[] = {
                .machine_check_early    = __machine_check_early_realmode_p8,
                .platform               = "power8",
        },
+       {       /* Power8 DD1: Does not support doorbell IPIs */
+               .pvr_mask               = 0xffffff00,
+               .pvr_value              = 0x004d0100,
+               .cpu_name               = "POWER8 (raw)",
+               .cpu_features           = CPU_FTRS_POWER8_DD1,
+               .cpu_user_features      = COMMON_USER_POWER8,
+               .cpu_user_features2     = COMMON_USER2_POWER8,
+               .mmu_features           = MMU_FTRS_POWER8,
+               .icache_bsize           = 128,
+               .dcache_bsize           = 128,
+               .num_pmcs               = 6,
+               .pmc_type               = PPC_PMC_IBM,
+               .oprofile_cpu_type      = "ppc64/power8",
+               .oprofile_type          = PPC_OPROFILE_INVALID,
+               .cpu_setup              = __setup_cpu_power8,
+               .cpu_restore            = __restore_cpu_power8,
+               .flush_tlb              = __flush_tlb_power8,
+               .machine_check_early    = __machine_check_early_realmode_p8,
+               .platform               = "power8",
+       },
        {       /* Power8 */
                .pvr_mask               = 0xffff0000,
                .pvr_value              = 0x004d0000,
index 51a3ff78838aaf1eb6726e92cb871c128221eb2e..1007fb802e6b0436ac16a2595720fa505fb3d415 100644 (file)
@@ -747,7 +747,7 @@ int setup_profiling_timer(unsigned int multiplier)
 
 #ifdef CONFIG_SCHED_SMT
 /* cpumask of CPUs with asymetric SMT dependancy */
-static const int powerpc_smt_flags(void)
+static int powerpc_smt_flags(void)
 {
        int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
 
index 80561074078d01ca8d101f7997a55699e8589e3f..68468d695f12ab864281f19a3cfd872a6976fc8d 100644 (file)
@@ -1562,7 +1562,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
                                goto out;
                        }
                        if (!rma_setup && is_vrma_hpte(v)) {
-                               unsigned long psize = hpte_page_size(v, r);
+                               unsigned long psize = hpte_base_page_size(v, r);
                                unsigned long senc = slb_pgsize_encoding(psize);
                                unsigned long lpcr;
 
index 6e6224318c36aaf166e33e7c57c8a8e0dcd9544e..5a24d3c2b6b8ce9bb39f59f4c69733b00ca068f6 100644 (file)
@@ -814,13 +814,10 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
                        r = hpte[i+1];
 
                        /*
-                        * Check the HPTE again, including large page size
-                        * Since we don't currently allow any MPSS (mixed
-                        * page-size segment) page sizes, it is sufficient
-                        * to check against the actual page size.
+                        * Check the HPTE again, including base page size
                         */
                        if ((v & valid) && (v & mask) == val &&
-                           hpte_page_size(v, r) == (1ul << pshift))
+                           hpte_base_page_size(v, r) == (1ul << pshift))
                                /* Return with the HPTE still locked */
                                return (hash << 3) + (i >> 1);
 
index 868347ef09fd48bcf8bfd343becb49f6898887c5..558a67df8126434eb427c7ca24eebabb82676cac 100644 (file)
@@ -48,7 +48,7 @@
  *
  * LR = return address to continue at after eventually re-enabling MMU
  */
-_GLOBAL(kvmppc_hv_entry_trampoline)
+_GLOBAL_TOC(kvmppc_hv_entry_trampoline)
        mflr    r0
        std     r0, PPC_LR_STKOFF(r1)
        stdu    r1, -112(r1)
index e2c29e381dc7096d13af053526e0d98cf951d08e..d044b8b7c69dd6b675de969d30cc0345d104ba96 100644 (file)
 #include <asm/exception-64s.h>
 
 #if defined(CONFIG_PPC_BOOK3S_64)
+#if defined(_CALL_ELF) && _CALL_ELF == 2
+#define FUNC(name)             name
+#else
 #define FUNC(name)             GLUE(.,name)
+#endif
 #define GET_SHADOW_VCPU(reg)    addi   reg, r13, PACA_SVCPU
 
 #elif defined(CONFIG_PPC_BOOK3S_32)
index 9eec675220e621e029eac8ecf2a5dabfff17df5a..16c4d88ba27df9ed3caba2e37e20d02a40407dbc 100644 (file)
 
 #if defined(CONFIG_PPC_BOOK3S_64)
 
+#if defined(_CALL_ELF) && _CALL_ELF == 2
+#define FUNC(name)             name
+#else
 #define FUNC(name)             GLUE(.,name)
+#endif
 
 #elif defined(CONFIG_PPC_BOOK3S_32)
 
@@ -146,7 +150,7 @@ kvmppc_handler_skip_ins:
  * On entry, r4 contains the guest shadow MSR
  * MSR.EE has to be 0 when calling this function
  */
-_GLOBAL(kvmppc_entry_trampoline)
+_GLOBAL_TOC(kvmppc_entry_trampoline)
        mfmsr   r5
        LOAD_REG_ADDR(r7, kvmppc_handler_trampoline_enter)
        toreal(r7)
index edb14ba992b34fa74f3e7cf11c1b9a8a1b01e921..ef27fbd5d9c54694beac4d1b400dc7d512db38da 100644 (file)
@@ -23,20 +23,20 @@ static void kvm_rtas_set_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
        u32 irq, server, priority;
        int rc;
 
-       if (args->nargs != 3 || args->nret != 1) {
+       if (be32_to_cpu(args->nargs) != 3 || be32_to_cpu(args->nret) != 1) {
                rc = -3;
                goto out;
        }
 
-       irq = args->args[0];
-       server = args->args[1];
-       priority = args->args[2];
+       irq = be32_to_cpu(args->args[0]);
+       server = be32_to_cpu(args->args[1]);
+       priority = be32_to_cpu(args->args[2]);
 
        rc = kvmppc_xics_set_xive(vcpu->kvm, irq, server, priority);
        if (rc)
                rc = -3;
 out:
-       args->rets[0] = rc;
+       args->rets[0] = cpu_to_be32(rc);
 }
 
 static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
@@ -44,12 +44,12 @@ static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
        u32 irq, server, priority;
        int rc;
 
-       if (args->nargs != 1 || args->nret != 3) {
+       if (be32_to_cpu(args->nargs) != 1 || be32_to_cpu(args->nret) != 3) {
                rc = -3;
                goto out;
        }
 
-       irq = args->args[0];
+       irq = be32_to_cpu(args->args[0]);
 
        server = priority = 0;
        rc = kvmppc_xics_get_xive(vcpu->kvm, irq, &server, &priority);
@@ -58,10 +58,10 @@ static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
                goto out;
        }
 
-       args->rets[1] = server;
-       args->rets[2] = priority;
+       args->rets[1] = cpu_to_be32(server);
+       args->rets[2] = cpu_to_be32(priority);
 out:
-       args->rets[0] = rc;
+       args->rets[0] = cpu_to_be32(rc);
 }
 
 static void kvm_rtas_int_off(struct kvm_vcpu *vcpu, struct rtas_args *args)
@@ -69,18 +69,18 @@ static void kvm_rtas_int_off(struct kvm_vcpu *vcpu, struct rtas_args *args)
        u32 irq;
        int rc;
 
-       if (args->nargs != 1 || args->nret != 1) {
+       if (be32_to_cpu(args->nargs) != 1 || be32_to_cpu(args->nret) != 1) {
                rc = -3;
                goto out;
        }
 
-       irq = args->args[0];
+       irq = be32_to_cpu(args->args[0]);
 
        rc = kvmppc_xics_int_off(vcpu->kvm, irq);
        if (rc)
                rc = -3;
 out:
-       args->rets[0] = rc;
+       args->rets[0] = cpu_to_be32(rc);
 }
 
 static void kvm_rtas_int_on(struct kvm_vcpu *vcpu, struct rtas_args *args)
@@ -88,18 +88,18 @@ static void kvm_rtas_int_on(struct kvm_vcpu *vcpu, struct rtas_args *args)
        u32 irq;
        int rc;
 
-       if (args->nargs != 1 || args->nret != 1) {
+       if (be32_to_cpu(args->nargs) != 1 || be32_to_cpu(args->nret) != 1) {
                rc = -3;
                goto out;
        }
 
-       irq = args->args[0];
+       irq = be32_to_cpu(args->args[0]);
 
        rc = kvmppc_xics_int_on(vcpu->kvm, irq);
        if (rc)
                rc = -3;
 out:
-       args->rets[0] = rc;
+       args->rets[0] = cpu_to_be32(rc);
 }
 #endif /* CONFIG_KVM_XICS */
 
@@ -205,32 +205,6 @@ int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp)
        return rc;
 }
 
-static void kvmppc_rtas_swap_endian_in(struct rtas_args *args)
-{
-#ifdef __LITTLE_ENDIAN__
-       int i;
-
-       args->token = be32_to_cpu(args->token);
-       args->nargs = be32_to_cpu(args->nargs);
-       args->nret = be32_to_cpu(args->nret);
-       for (i = 0; i < args->nargs; i++)
-               args->args[i] = be32_to_cpu(args->args[i]);
-#endif
-}
-
-static void kvmppc_rtas_swap_endian_out(struct rtas_args *args)
-{
-#ifdef __LITTLE_ENDIAN__
-       int i;
-
-       for (i = 0; i < args->nret; i++)
-               args->args[i] = cpu_to_be32(args->args[i]);
-       args->token = cpu_to_be32(args->token);
-       args->nargs = cpu_to_be32(args->nargs);
-       args->nret = cpu_to_be32(args->nret);
-#endif
-}
-
 int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
 {
        struct rtas_token_definition *d;
@@ -249,8 +223,6 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
        if (rc)
                goto fail;
 
-       kvmppc_rtas_swap_endian_in(&args);
-
        /*
         * args->rets is a pointer into args->args. Now that we've
         * copied args we need to fix it up to point into our copy,
@@ -258,13 +230,13 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
         * value so we can restore it on the way out.
         */
        orig_rets = args.rets;
-       args.rets = &args.args[args.nargs];
+       args.rets = &args.args[be32_to_cpu(args.nargs)];
 
        mutex_lock(&vcpu->kvm->lock);
 
        rc = -ENOENT;
        list_for_each_entry(d, &vcpu->kvm->arch.rtas_tokens, list) {
-               if (d->token == args.token) {
+               if (d->token == be32_to_cpu(args.token)) {
                        d->handler->handler(vcpu, &args);
                        rc = 0;
                        break;
@@ -275,7 +247,6 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
 
        if (rc == 0) {
                args.rets = orig_rets;
-               kvmppc_rtas_swap_endian_out(&args);
                rc = kvm_write_guest(vcpu->kvm, args_phys, &args, sizeof(args));
                if (rc)
                        goto fail;
index dd2cc03f406f9a0e1f844473ca92c5560eca6c67..86903d3f5a033d215b3857f979fcea2cb68a3de6 100644 (file)
@@ -473,7 +473,8 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
                if (printk_ratelimit())
                        pr_err("%s: pte not present: gfn %lx, pfn %lx\n",
                                __func__, (long)gfn, pfn);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto out;
        }
        kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg);
 
index 0738f96befbff76829e1119a5feefc915be39da8..43435c6892fb05d4ac2a732d30eef6e58b4bfaf8 100644 (file)
@@ -77,7 +77,7 @@ _GLOBAL(memset)
        stb     r4,0(r6)
        blr
 
-_GLOBAL(memmove)
+_GLOBAL_TOC(memmove)
        cmplw   0,r3,r4
        bgt     backwards_memcpy
        b       memcpy
index 412dd46dd0b7ea7136af14e8559e5d9b78d2c08a..5c09f365c84276161b75fd524fe270b2d43a1db2 100644 (file)
@@ -1198,7 +1198,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
                        sh = regs->gpr[rb] & 0x3f;
                        ival = (signed int) regs->gpr[rd];
                        regs->gpr[ra] = ival >> (sh < 32 ? sh : 31);
-                       if (ival < 0 && (sh >= 32 || (ival & ((1 << sh) - 1)) != 0))
+                       if (ival < 0 && (sh >= 32 || (ival & ((1ul << sh) - 1)) != 0))
                                regs->xer |= XER_CA;
                        else
                                regs->xer &= ~XER_CA;
@@ -1208,7 +1208,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
                        sh = rb;
                        ival = (signed int) regs->gpr[rd];
                        regs->gpr[ra] = ival >> sh;
-                       if (ival < 0 && (ival & ((1 << sh) - 1)) != 0)
+                       if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
                                regs->xer |= XER_CA;
                        else
                                regs->xer &= ~XER_CA;
@@ -1216,7 +1216,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
 
 #ifdef __powerpc64__
                case 27:        /* sld */
-                       sh = regs->gpr[rd] & 0x7f;
+                       sh = regs->gpr[rb] & 0x7f;
                        if (sh < 64)
                                regs->gpr[ra] = regs->gpr[rd] << sh;
                        else
@@ -1235,7 +1235,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
                        sh = regs->gpr[rb] & 0x7f;
                        ival = (signed long int) regs->gpr[rd];
                        regs->gpr[ra] = ival >> (sh < 64 ? sh : 63);
-                       if (ival < 0 && (sh >= 64 || (ival & ((1 << sh) - 1)) != 0))
+                       if (ival < 0 && (sh >= 64 || (ival & ((1ul << sh) - 1)) != 0))
                                regs->xer |= XER_CA;
                        else
                                regs->xer &= ~XER_CA;
@@ -1246,7 +1246,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
                        sh = rb | ((instr & 2) << 4);
                        ival = (signed long int) regs->gpr[rd];
                        regs->gpr[ra] = ival >> sh;
-                       if (ival < 0 && (ival & ((1 << sh) - 1)) != 0)
+                       if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
                                regs->xer |= XER_CA;
                        else
                                regs->xer &= ~XER_CA;
index 6dcdadefd8d059a7c65207af5b71be761116947c..82e82cadcde56589361ce04c98d554bcf4faec7d 100644 (file)
@@ -390,12 +390,16 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
                case BPF_ANC | SKF_AD_VLAN_TAG:
                case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
                        BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
+                       BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
+
                        PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
                                                          vlan_tci));
-                       if (code == (BPF_ANC | SKF_AD_VLAN_TAG))
-                               PPC_ANDI(r_A, r_A, VLAN_VID_MASK);
-                       else
+                       if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) {
+                               PPC_ANDI(r_A, r_A, ~VLAN_TAG_PRESENT);
+                       } else {
                                PPC_ANDI(r_A, r_A, VLAN_TAG_PRESENT);
+                               PPC_SRWI(r_A, r_A, 12);
+                       }
                        break;
                case BPF_ANC | SKF_AD_QUEUE:
                        BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
index 022b38e6a80be83c62900419c027562600b0e02a..2d0b4d68a40a076f970fd458ab09d06785a43ff2 100644 (file)
@@ -86,6 +86,7 @@ static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa,
        }
 
        of_node_set_flag(dn, OF_DYNAMIC);
+       of_node_init(dn);
 
        return dn;
 }
index 0435bb65d0aaf616d9cf4257b15e242d18d5d58c..1c0a60d988678ebf1f80f5ddc30beaa3f1f40094 100644 (file)
@@ -69,6 +69,7 @@ static int pSeries_reconfig_add_node(const char *path, struct property *proplist
 
        np->properties = proplist;
        of_node_set_flag(np, OF_DYNAMIC);
+       of_node_init(np);
 
        np->parent = derive_parent(path);
        if (IS_ERR(np->parent)) {
index df38c70cd59ef295328068e432fa55b04e03651f..18ea9e3f8142954926b8fea3afe79ce90193a4b3 100644 (file)
@@ -51,8 +51,8 @@ static inline int restore_fp_ctl(u32 *fpc)
                return 0;
 
        asm volatile(
-               "0:     lfpc    %1\n"
-               "       la      %0,0\n"
+               "       lfpc    %1\n"
+               "0:     la      %0,0\n"
                "1:\n"
                EX_TABLE(0b,1b)
                : "=d" (rc) : "Q" (*fpc), "0" (-EINVAL));
index 7ba7d6784510c257bbadc72d8f9cdf58151b8e5b..e88d35d749501e43b296e8794351f691368dcf5e 100644 (file)
@@ -437,11 +437,11 @@ ENTRY(startup_kdump)
 
 #if defined(CONFIG_64BIT)
 #if defined(CONFIG_MARCH_ZEC12)
-       .long 3, 0xc100efea, 0xf46ce800, 0x00400000
+       .long 3, 0xc100eff2, 0xf46ce800, 0x00400000
 #elif defined(CONFIG_MARCH_Z196)
-       .long 2, 0xc100efea, 0xf46c0000
+       .long 2, 0xc100eff2, 0xf46c0000
 #elif defined(CONFIG_MARCH_Z10)
-       .long 2, 0xc100efea, 0xf0680000
+       .long 2, 0xc100eff2, 0xf0680000
 #elif defined(CONFIG_MARCH_Z9_109)
        .long 1, 0xc100efc2
 #elif defined(CONFIG_MARCH_Z990)
index 2d716734b5b1b2b482e580aee483d07b014c301f..5dc7ad9e2fbf2d1b035194b30db18a82957c1358 100644 (file)
@@ -334,9 +334,14 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
                        unsigned long mask = PSW_MASK_USER;
 
                        mask |= is_ri_task(child) ? PSW_MASK_RI : 0;
-                       if ((data & ~mask) != PSW_USER_BITS)
+                       if ((data ^ PSW_USER_BITS) & ~mask)
+                               /* Invalid psw mask. */
+                               return -EINVAL;
+                       if ((data & PSW_MASK_ASC) == PSW_ASC_HOME)
+                               /* Invalid address-space-control bits */
                                return -EINVAL;
                        if ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA))
+                               /* Invalid addressing mode bits */
                                return -EINVAL;
                }
                *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data;
@@ -672,9 +677,12 @@ static int __poke_user_compat(struct task_struct *child,
 
                        mask |= is_ri_task(child) ? PSW32_MASK_RI : 0;
                        /* Build a 64 bit psw mask from 31 bit mask. */
-                       if ((tmp & ~mask) != PSW32_USER_BITS)
+                       if ((tmp ^ PSW32_USER_BITS) & ~mask)
                                /* Invalid psw mask. */
                                return -EINVAL;
+                       if ((data & PSW32_MASK_ASC) == PSW32_ASC_HOME)
+                               /* Invalid address-space-control bits */
+                               return -EINVAL;
                        regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
                                (regs->psw.mask & PSW_MASK_BA) |
                                (__u64)(tmp & mask) << 32;
index 9ddc51eeb8d690410aa6fbf98e1a36a6d544e4db..30de42730b2f20096890cf4022023a4f1b292884 100644 (file)
 static LIST_HEAD(zpci_list);
 static DEFINE_SPINLOCK(zpci_list_lock);
 
-static void zpci_enable_irq(struct irq_data *data);
-static void zpci_disable_irq(struct irq_data *data);
-
 static struct irq_chip zpci_irq_chip = {
        .name = "zPCI",
-       .irq_unmask = zpci_enable_irq,
-       .irq_mask = zpci_disable_irq,
+       .irq_unmask = unmask_msi_irq,
+       .irq_mask = mask_msi_irq,
 };
 
 static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES);
@@ -244,43 +241,6 @@ static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len)
        return rc;
 }
 
-static int zpci_msi_set_mask_bits(struct msi_desc *msi, u32 mask, u32 flag)
-{
-       int offset, pos;
-       u32 mask_bits;
-
-       if (msi->msi_attrib.is_msix) {
-               offset = msi->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
-                       PCI_MSIX_ENTRY_VECTOR_CTRL;
-               msi->masked = readl(msi->mask_base + offset);
-               writel(flag, msi->mask_base + offset);
-       } else if (msi->msi_attrib.maskbit) {
-               pos = (long) msi->mask_base;
-               pci_read_config_dword(msi->dev, pos, &mask_bits);
-               mask_bits &= ~(mask);
-               mask_bits |= flag & mask;
-               pci_write_config_dword(msi->dev, pos, mask_bits);
-       } else
-               return 0;
-
-       msi->msi_attrib.maskbit = !!flag;
-       return 1;
-}
-
-static void zpci_enable_irq(struct irq_data *data)
-{
-       struct msi_desc *msi = irq_get_msi_desc(data->irq);
-
-       zpci_msi_set_mask_bits(msi, 1, 0);
-}
-
-static void zpci_disable_irq(struct irq_data *data)
-{
-       struct msi_desc *msi = irq_get_msi_desc(data->irq);
-
-       zpci_msi_set_mask_bits(msi, 1, 1);
-}
-
 void pcibios_fixup_bus(struct pci_bus *bus)
 {
 }
@@ -487,7 +447,10 @@ void arch_teardown_msi_irqs(struct pci_dev *pdev)
 
        /* Release MSI interrupts */
        list_for_each_entry(msi, &pdev->msi_list, list) {
-               zpci_msi_set_mask_bits(msi, 1, 1);
+               if (msi->msi_attrib.is_msix)
+                       default_msix_mask_irq(msi, 1);
+               else
+                       default_msi_mask_irq(msi, 1, 1);
                irq_set_msi_desc(msi->irq, NULL);
                irq_free_desc(msi->irq);
                msi->msg.address_lo = 0;
index d4d16e4be07c246d5941c799e9f4cff5d091d702..bf5b3f5f496239740d0fc451e6c7806d2c2a59fc 100644 (file)
@@ -32,7 +32,8 @@ endif
 
 cflags-$(CONFIG_CPU_SH2)               := $(call cc-option,-m2,)
 cflags-$(CONFIG_CPU_SH2A)              += $(call cc-option,-m2a,) \
-                                          $(call cc-option,-m2a-nofpu,)
+                                          $(call cc-option,-m2a-nofpu,) \
+                                          $(call cc-option,-m4-nofpu,)
 cflags-$(CONFIG_CPU_SH3)               := $(call cc-option,-m3,)
 cflags-$(CONFIG_CPU_SH4)               := $(call cc-option,-m4,) \
        $(call cc-option,-mno-implicit-fp,-m4-nofpu)
index 29f2e988c56a9be4cb8824942f1226ebf958670f..407c87d9879ae872b490c1e6adf75d360d4a82f3 100644 (file)
@@ -78,6 +78,7 @@ config SPARC64
        select HAVE_C_RECORDMCOUNT
        select NO_BOOTMEM
        select HAVE_ARCH_AUDITSYSCALL
+       select ARCH_SUPPORTS_ATOMIC_RMW
 
 config ARCH_DEFCONFIG
        string
index b73274fb961a27a9b54eb5fd1719650762fef305..42f2bca1d338c231c63c6f976b647336459b1435 100644 (file)
 #define __NR_finit_module      342
 #define __NR_sched_setattr     343
 #define __NR_sched_getattr     344
+#define __NR_renameat2         345
 
-#define NR_syscalls            345
+#define NR_syscalls            346
 
 /* Bitmask values returned from kern_features system call.  */
 #define KERN_FEATURE_MIXED_MODE_STACK  0x00000001
index d066eb18650c1598f898f7a4314bdd7ed5b606a7..f834224208ed8ca73d0b12f9d5ad320efd293ca8 100644 (file)
@@ -48,6 +48,7 @@ SIGN1(sys32_futex, compat_sys_futex, %o1)
 SIGN1(sys32_recvfrom, compat_sys_recvfrom, %o0)
 SIGN1(sys32_recvmsg, compat_sys_recvmsg, %o0)
 SIGN1(sys32_sendmsg, compat_sys_sendmsg, %o0)
+SIGN2(sys32_renameat2, sys_renameat2, %o0, %o2)
 
        .globl          sys32_mmap2
 sys32_mmap2:
index 151ace8766cc2d99d5be3552a4b2014eb74fdaa6..85fe9b1087cdb3eae326a46337f5484ca6d9a636 100644 (file)
@@ -86,3 +86,4 @@ sys_call_table:
 /*330*/        .long sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
 /*335*/        .long sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
 /*340*/        .long sys_ni_syscall, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
+/*345*/        .long sys_renameat2
index 4bd4e2bb26cf4d35a8980f1a9040cdd88a645a4a..33ecba2826ea20b65693d90649a791db91fd4fcd 100644 (file)
@@ -87,6 +87,7 @@ sys_call_table32:
 /*330*/        .word compat_sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime
        .word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev
 /*340*/        .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
+       .word sys32_renameat2
 
 #endif /* CONFIG_COMPAT */
 
@@ -165,3 +166,4 @@ sys_call_table:
 /*330*/        .word sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
        .word sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
 /*340*/        .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
+       .word sys_renameat2
index 9472079471bbfbbac8e24632ae5b79444f47e646..f1b3eb14b855ccd8740b328085521c0b432407d2 100644 (file)
@@ -12,6 +12,7 @@
 #include <mem_user.h>
 #include <os.h>
 #include <skas.h>
+#include <kern_util.h>
 
 struct host_vm_change {
        struct host_vm_op {
@@ -124,6 +125,9 @@ static int add_munmap(unsigned long addr, unsigned long len,
        struct host_vm_op *last;
        int ret = 0;
 
+       if ((addr >= STUB_START) && (addr < STUB_END))
+               return -EINVAL;
+
        if (hvc->index != 0) {
                last = &hvc->ops[hvc->index - 1];
                if ((last->type == MUNMAP) &&
@@ -283,8 +287,11 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
        /* This is not an else because ret is modified above */
        if (ret) {
                printk(KERN_ERR "fix_range_common: failed, killing current "
-                      "process\n");
+                      "process: %d\n", task_tgid_vnr(current));
+               /* We are under mmap_sem, release it such that current can terminate */
+               up_write(&current->mm->mmap_sem);
                force_sig(SIGKILL, current);
+               do_signal();
        }
 }
 
index 974b87474a9900f1909f845d449eba90dc9b8338..5678c3571e7cb4d1572d0b16a91b0650f76095c7 100644 (file)
@@ -206,7 +206,7 @@ unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user,
        int is_write = FAULT_WRITE(fi);
        unsigned long address = FAULT_ADDRESS(fi);
 
-       if (regs)
+       if (!is_user && regs)
                current->thread.segv_regs = container_of(regs, struct pt_regs, regs);
 
        if (!is_user && (address >= start_vm) && (address < end_vm)) {
index d531879a4617695e02df3c1550922cfede7ea4e3..908579f2b0ab14cf464777c0a45a37eb849d341f 100644 (file)
@@ -54,7 +54,7 @@ static int ptrace_dump_regs(int pid)
 
 void wait_stub_done(int pid)
 {
-       int n, status, err, bad_stop = 0;
+       int n, status, err;
 
        while (1) {
                CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED | __WALL));
@@ -74,8 +74,6 @@ void wait_stub_done(int pid)
 
        if (((1 << WSTOPSIG(status)) & STUB_DONE_MASK) != 0)
                return;
-       else
-               bad_stop = 1;
 
 bad_wait:
        err = ptrace_dump_regs(pid);
@@ -85,10 +83,7 @@ bad_wait:
        printk(UM_KERN_ERR "wait_stub_done : failed to wait for SIGTRAP, "
               "pid = %d, n = %d, errno = %d, status = 0x%x\n", pid, n, errno,
               status);
-       if (bad_stop)
-               kill(pid, SIGKILL);
-       else
-               fatal_sigsegv();
+       fatal_sigsegv();
 }
 
 extern unsigned long current_stub_stack(void);
index a8f749ef0fdcc626a16d2a94e02c6a65d26a2fe8..70c43b5371bb57d5c3a3ec58f825b5abfa72f12b 100644 (file)
@@ -21,6 +21,7 @@ config X86_64
 ### Arch settings
 config X86
        def_bool y
+       select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI
        select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
        select ARCH_MIGHT_HAVE_PC_PARPORT
        select ARCH_MIGHT_HAVE_PC_SERIO
@@ -131,6 +132,8 @@ config X86
        select HAVE_CC_STACKPROTECTOR
        select GENERIC_CPU_AUTOPROBE
        select HAVE_ARCH_AUDITSYSCALL
+       select ARCH_SUPPORTS_ATOMIC_RMW
+       select ACPI_LEGACY_TABLES_LOOKUP if ACPI
 
 config INSTRUCTION_DECODER
        def_bool y
index 84c223479e3c9b9e3aa64ad25d4f633909f497dd..7a6d43a554d7789316c9980080eaad17026105ae 100644 (file)
@@ -91,10 +91,9 @@ bs_die:
 
        .section ".bsdata", "a"
 bugger_off_msg:
-       .ascii  "Direct floppy boot is not supported. "
-       .ascii  "Use a boot loader program instead.\r\n"
+       .ascii  "Use a boot loader.\r\n"
        .ascii  "\n"
-       .ascii  "Remove disk and press any key to reboot ...\r\n"
+       .ascii  "Remove disk and press any key to reboot...\r\n"
        .byte   0
 
 #ifdef CONFIG_EFI_STUB
@@ -108,7 +107,7 @@ coff_header:
 #else
        .word   0x8664                          # x86-64
 #endif
-       .word   3                               # nr_sections
+       .word   4                               # nr_sections
        .long   0                               # TimeDateStamp
        .long   0                               # PointerToSymbolTable
        .long   1                               # NumberOfSymbols
@@ -250,6 +249,25 @@ section_table:
        .word   0                               # NumberOfLineNumbers
        .long   0x60500020                      # Characteristics (section flags)
 
+       #
+       # The offset & size fields are filled in by build.c.
+       #
+       .ascii  ".bss"
+       .byte   0
+       .byte   0
+       .byte   0
+       .byte   0
+       .long   0
+       .long   0x0
+       .long   0                               # Size of initialized data
+                                               # on disk
+       .long   0x0
+       .long   0                               # PointerToRelocations
+       .long   0                               # PointerToLineNumbers
+       .word   0                               # NumberOfRelocations
+       .word   0                               # NumberOfLineNumbers
+       .long   0xc8000080                      # Characteristics (section flags)
+
 #endif /* CONFIG_EFI_STUB */
 
        # Kernel attributes; used by setup.  This is part 1 of the
index 1a2f2121cada2a11a1b273bdc518b1dcce34ea99..a7661c430cd98d28795ddee61bf5506c5ef3a1e5 100644 (file)
@@ -143,7 +143,7 @@ static void usage(void)
 
 #ifdef CONFIG_EFI_STUB
 
-static void update_pecoff_section_header(char *section_name, u32 offset, u32 size)
+static void update_pecoff_section_header_fields(char *section_name, u32 vma, u32 size, u32 datasz, u32 offset)
 {
        unsigned int pe_header;
        unsigned short num_sections;
@@ -164,10 +164,10 @@ static void update_pecoff_section_header(char *section_name, u32 offset, u32 siz
                        put_unaligned_le32(size, section + 0x8);
 
                        /* section header vma field */
-                       put_unaligned_le32(offset, section + 0xc);
+                       put_unaligned_le32(vma, section + 0xc);
 
                        /* section header 'size of initialised data' field */
-                       put_unaligned_le32(size, section + 0x10);
+                       put_unaligned_le32(datasz, section + 0x10);
 
                        /* section header 'file offset' field */
                        put_unaligned_le32(offset, section + 0x14);
@@ -179,6 +179,11 @@ static void update_pecoff_section_header(char *section_name, u32 offset, u32 siz
        }
 }
 
+static void update_pecoff_section_header(char *section_name, u32 offset, u32 size)
+{
+       update_pecoff_section_header_fields(section_name, offset, size, size, offset);
+}
+
 static void update_pecoff_setup_and_reloc(unsigned int size)
 {
        u32 setup_offset = 0x200;
@@ -203,9 +208,6 @@ static void update_pecoff_text(unsigned int text_start, unsigned int file_sz)
 
        pe_header = get_unaligned_le32(&buf[0x3c]);
 
-       /* Size of image */
-       put_unaligned_le32(file_sz, &buf[pe_header + 0x50]);
-
        /*
         * Size of code: Subtract the size of the first sector (512 bytes)
         * which includes the header.
@@ -220,6 +222,22 @@ static void update_pecoff_text(unsigned int text_start, unsigned int file_sz)
        update_pecoff_section_header(".text", text_start, text_sz);
 }
 
+static void update_pecoff_bss(unsigned int file_sz, unsigned int init_sz)
+{
+       unsigned int pe_header;
+       unsigned int bss_sz = init_sz - file_sz;
+
+       pe_header = get_unaligned_le32(&buf[0x3c]);
+
+       /* Size of uninitialized data */
+       put_unaligned_le32(bss_sz, &buf[pe_header + 0x24]);
+
+       /* Size of image */
+       put_unaligned_le32(init_sz, &buf[pe_header + 0x50]);
+
+       update_pecoff_section_header_fields(".bss", file_sz, bss_sz, 0, 0);
+}
+
 static int reserve_pecoff_reloc_section(int c)
 {
        /* Reserve 0x20 bytes for .reloc section */
@@ -259,6 +277,8 @@ static void efi_stub_entry_update(void)
 static inline void update_pecoff_setup_and_reloc(unsigned int size) {}
 static inline void update_pecoff_text(unsigned int text_start,
                                      unsigned int file_sz) {}
+static inline void update_pecoff_bss(unsigned int file_sz,
+                                    unsigned int init_sz) {}
 static inline void efi_stub_defaults(void) {}
 static inline void efi_stub_entry_update(void) {}
 
@@ -310,7 +330,7 @@ static void parse_zoffset(char *fname)
 
 int main(int argc, char ** argv)
 {
-       unsigned int i, sz, setup_sectors;
+       unsigned int i, sz, setup_sectors, init_sz;
        int c;
        u32 sys_size;
        struct stat sb;
@@ -376,7 +396,9 @@ int main(int argc, char ** argv)
        buf[0x1f1] = setup_sectors-1;
        put_unaligned_le32(sys_size, &buf[0x1f4]);
 
-       update_pecoff_text(setup_sectors * 512, sz + i + ((sys_size * 16) - sz));
+       update_pecoff_text(setup_sectors * 512, i + (sys_size * 16));
+       init_sz = get_unaligned_le32(&buf[0x260]);
+       update_pecoff_bss(i + (sys_size * 16), init_sz);
 
        efi_stub_entry_update();
 
index 66873297e9f5764d89cf1e57544c65b7f2d86570..1b010a859b8b4816c7b28decea1f853eb0b2b5b3 100644 (file)
@@ -18,8 +18,6 @@
 
 #define ACPI_FLUSH_CPU_CACHE() wbinvd()
 
-#ifdef CONFIG_ACPI
-
 int __acpi_acquire_global_lock(unsigned int *lock);
 int __acpi_release_global_lock(unsigned int *lock);
 
@@ -44,6 +42,4 @@ int __acpi_release_global_lock(unsigned int *lock);
            : "=r"(n_hi), "=r"(n_lo)    \
            : "0"(n_hi), "1"(n_lo))
 
-#endif
-
 #endif /* _ASM_X86_ACENV_H */
index e06225eda63597e4f23ef01c8099fb1b5cf45c41..0ab4f9fd268764114e3f252b07895c4bcaf63f90 100644 (file)
@@ -121,6 +121,11 @@ static inline void arch_acpi_set_pdc_bits(u32 *buf)
                buf[2] &= ~(ACPI_PDC_C_C2C3_FFH);
 }
 
+static inline bool acpi_has_cpu_in_madt(void)
+{
+       return !!acpi_lapic;
+}
+
 #else /* !CONFIG_ACPI */
 
 #define acpi_lapic 0
index f3a1f04ed4cb80794f539cb5e2fb1c3e7e803dd6..5848744514142c374174cdff06e3d7fe9f8cc793 100644 (file)
@@ -841,7 +841,6 @@ static int apm_do_idle(void)
        u32 eax;
        u8 ret = 0;
        int idled = 0;
-       int polling;
        int err = 0;
 
        if (!need_resched()) {
index a80029035bf2ae6acc5b4958df0dae905308d50b..f9e4fdd3b87736044840678a9fcf9afd6fc71b2f 100644 (file)
@@ -370,6 +370,17 @@ static void init_intel(struct cpuinfo_x86 *c)
         */
        detect_extended_topology(c);
 
+       if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
+               /*
+                * let's use the legacy cpuid vector 0x1 and 0x4 for topology
+                * detection.
+                */
+               c->x86_max_cores = intel_num_cpu_cores(c);
+#ifdef CONFIG_X86_32
+               detect_ht(c);
+#endif
+       }
+
        l2 = init_intel_cacheinfo(c);
        if (c->cpuid_level > 9) {
                unsigned eax = cpuid_eax(10);
@@ -438,17 +449,6 @@ static void init_intel(struct cpuinfo_x86 *c)
                set_cpu_cap(c, X86_FEATURE_P3);
 #endif
 
-       if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
-               /*
-                * let's use the legacy cpuid vector 0x1 and 0x4 for topology
-                * detection.
-                */
-               c->x86_max_cores = intel_num_cpu_cores(c);
-#ifdef CONFIG_X86_32
-               detect_ht(c);
-#endif
-       }
-
        /* Work around errata */
        srat_detect_node(c);
 
index a952e9c85b6fad81684c4bd39418bd5623a634ff..9c8f7394c612e7fa74d0ce356caa6d526dc02b0f 100644 (file)
@@ -730,6 +730,18 @@ unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c)
 #endif
        }
 
+#ifdef CONFIG_X86_HT
+       /*
+        * If cpu_llc_id is not yet set, this means cpuid_level < 4 which in
+        * turns means that the only possibility is SMT (as indicated in
+        * cpuid1). Since cpuid2 doesn't specify shared caches, and we know
+        * that SMT shares all caches, we can unconditionally set cpu_llc_id to
+        * c->phys_proc_id.
+        */
+       if (per_cpu(cpu_llc_id, cpu) == BAD_APICID)
+               per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
+#endif
+
        c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
 
        return l2;
index bb92f38153b210a05fc16520e4b0bc44e828537d..9a79c8dbd8e87956374829d7a390b35e9330287d 100644 (file)
@@ -2451,6 +2451,12 @@ static __init int mcheck_init_device(void)
        for_each_online_cpu(i) {
                err = mce_device_create(i);
                if (err) {
+                       /*
+                        * Register notifier anyway (and do not unreg it) so
+                        * that we don't leave undeleted timers, see notifier
+                        * callback above.
+                        */
+                       __register_hotcpu_notifier(&mce_cpu_notifier);
                        cpu_notifier_register_done();
                        goto err_device_create;
                }
@@ -2471,10 +2477,6 @@ static __init int mcheck_init_device(void)
 err_register:
        unregister_syscore_ops(&mce_syscore_ops);
 
-       cpu_notifier_register_begin();
-       __unregister_hotcpu_notifier(&mce_cpu_notifier);
-       cpu_notifier_register_done();
-
 err_device_create:
        /*
         * We didn't keep track of which devices were created above, but
index 2bdfbff8a4f6165afb1e9931edcfb250c7113a34..2879ecdaac430c62710db3c90b326e6e25f60acf 100644 (file)
@@ -118,6 +118,9 @@ static int x86_pmu_extra_regs(u64 config, struct perf_event *event)
                        continue;
                if (event->attr.config1 & ~er->valid_mask)
                        return -EINVAL;
+               /* Check if the extra msrs can be safely accessed*/
+               if (!er->extra_msr_access)
+                       return -ENXIO;
 
                reg->idx = er->idx;
                reg->config = event->attr.config1;
index 3b2f9bdd974be198d0622e306ddbba427add2d25..8ade93111e0379fd79e0b421d29256b6c9b1b206 100644 (file)
@@ -295,14 +295,16 @@ struct extra_reg {
        u64                     config_mask;
        u64                     valid_mask;
        int                     idx;  /* per_xxx->regs[] reg index */
+       bool                    extra_msr_access;
 };
 
 #define EVENT_EXTRA_REG(e, ms, m, vm, i) {     \
-       .event = (e),           \
-       .msr = (ms),            \
-       .config_mask = (m),     \
-       .valid_mask = (vm),     \
-       .idx = EXTRA_REG_##i,   \
+       .event = (e),                   \
+       .msr = (ms),                    \
+       .config_mask = (m),             \
+       .valid_mask = (vm),             \
+       .idx = EXTRA_REG_##i,           \
+       .extra_msr_access = true,       \
        }
 
 #define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx)     \
index adb02aa62af5e310ff51ca720d1163b247489ccb..2502d0d9d246a1fe63c57070176b2bcc04cad3bb 100644 (file)
@@ -1381,6 +1381,15 @@ again:
 
        intel_pmu_lbr_read();
 
+       /*
+        * CondChgd bit 63 doesn't mean any overflow status. Ignore
+        * and clear the bit.
+        */
+       if (__test_and_clear_bit(63, (unsigned long *)&status)) {
+               if (!status)
+                       goto done;
+       }
+
        /*
         * PEBS overflow sets bit 62 in the global status register
         */
@@ -2173,6 +2182,41 @@ static void intel_snb_check_microcode(void)
        }
 }
 
+/*
+ * Under certain circumstances, access certain MSR may cause #GP.
+ * The function tests if the input MSR can be safely accessed.
+ */
+static bool check_msr(unsigned long msr, u64 mask)
+{
+       u64 val_old, val_new, val_tmp;
+
+       /*
+        * Read the current value, change it and read it back to see if it
+        * matches, this is needed to detect certain hardware emulators
+        * (qemu/kvm) that don't trap on the MSR access and always return 0s.
+        */
+       if (rdmsrl_safe(msr, &val_old))
+               return false;
+
+       /*
+        * Only change the bits which can be updated by wrmsrl.
+        */
+       val_tmp = val_old ^ mask;
+       if (wrmsrl_safe(msr, val_tmp) ||
+           rdmsrl_safe(msr, &val_new))
+               return false;
+
+       if (val_new != val_tmp)
+               return false;
+
+       /* Here it's sure that the MSR can be safely accessed.
+        * Restore the old value and return.
+        */
+       wrmsrl(msr, val_old);
+
+       return true;
+}
+
 static __init void intel_sandybridge_quirk(void)
 {
        x86_pmu.check_microcode = intel_snb_check_microcode;
@@ -2262,7 +2306,8 @@ __init int intel_pmu_init(void)
        union cpuid10_ebx ebx;
        struct event_constraint *c;
        unsigned int unused;
-       int version;
+       struct extra_reg *er;
+       int version, i;
 
        if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
                switch (boot_cpu_data.x86) {
@@ -2465,6 +2510,9 @@ __init int intel_pmu_init(void)
        case 62: /* IvyBridge EP */
                memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
                       sizeof(hw_cache_event_ids));
+               /* dTLB-load-misses on IVB is different than SNB */
+               hw_cache_event_ids[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = 0x8108; /* DTLB_LOAD_MISSES.DEMAND_LD_MISS_CAUSES_A_WALK */
+
                memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
                       sizeof(hw_cache_extra_regs));
 
@@ -2565,6 +2613,34 @@ __init int intel_pmu_init(void)
                }
        }
 
+       /*
+        * Access LBR MSR may cause #GP under certain circumstances.
+        * E.g. KVM doesn't support LBR MSR
+        * Check all LBT MSR here.
+        * Disable LBR access if any LBR MSRs can not be accessed.
+        */
+       if (x86_pmu.lbr_nr && !check_msr(x86_pmu.lbr_tos, 0x3UL))
+               x86_pmu.lbr_nr = 0;
+       for (i = 0; i < x86_pmu.lbr_nr; i++) {
+               if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) &&
+                     check_msr(x86_pmu.lbr_to + i, 0xffffUL)))
+                       x86_pmu.lbr_nr = 0;
+       }
+
+       /*
+        * Access extra MSR may cause #GP under certain circumstances.
+        * E.g. KVM doesn't support offcore event
+        * Check all extra_regs here.
+        */
+       if (x86_pmu.extra_regs) {
+               for (er = x86_pmu.extra_regs; er->msr; er++) {
+                       er->extra_msr_access = check_msr(er->msr, 0x1ffUL);
+                       /* Disable LBR select mapping */
+                       if ((er->idx == EXTRA_REG_LBR) && !er->extra_msr_access)
+                               x86_pmu.lbr_sel_map = NULL;
+               }
+       }
+
        /* Support full width counters using alternative MSR range */
        if (x86_pmu.intel_cap.full_width_write) {
                x86_pmu.max_period = x86_pmu.cntval_mask;
index 980970cb744db6793a0f355f092db0e4a17b6ec5..696ade311ded7d01103323d159cff621b46d4a89 100644 (file)
@@ -311,9 +311,11 @@ static int alloc_bts_buffer(int cpu)
        if (!x86_pmu.bts)
                return 0;
 
-       buffer = kzalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL, node);
-       if (unlikely(!buffer))
+       buffer = kzalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_NOWARN, node);
+       if (unlikely(!buffer)) {
+               WARN_ONCE(1, "%s: BTS buffer allocation failure\n", __func__);
                return -ENOMEM;
+       }
 
        max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE;
        thresh = max / 16;
index 65bbbea38b9c9c0f7246a3c4fee4176dd529647b..ae6552a0701f25330bd0e7027e5767ac32cdfdcd 100644 (file)
@@ -550,16 +550,16 @@ static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
        SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
        SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
        SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
-       SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xc),
-       SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xc),
+       SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa),
+       SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa),
        SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
        SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
        SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
        SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
        SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
        SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
-       SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xc),
-       SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xc),
+       SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa),
+       SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa),
        SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
        SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
        SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
@@ -1222,6 +1222,7 @@ static struct extra_reg ivt_uncore_cbox_extra_regs[] = {
        SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
                                  SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
        SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
+
        SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
        SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
        SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
@@ -1245,7 +1246,7 @@ static struct extra_reg ivt_uncore_cbox_extra_regs[] = {
        SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
        SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
        SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
-       SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
+       SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
        SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
        SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
        SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
index dbaa23e78b369721d5082af26cc18fa698985d09..0d0c9d4ab6d5b0d9cc1009ae03aebff22c1e8585 100644 (file)
@@ -425,8 +425,8 @@ sysenter_do_call:
        cmpl $(NR_syscalls), %eax
        jae sysenter_badsys
        call *sys_call_table(,%eax,4)
-       movl %eax,PT_EAX(%esp)
 sysenter_after_call:
+       movl %eax,PT_EAX(%esp)
        LOCKDEP_SYS_EXIT
        DISABLE_INTERRUPTS(CLBR_ANY)
        TRACE_IRQS_OFF
@@ -502,6 +502,7 @@ ENTRY(system_call)
        jae syscall_badsys
 syscall_call:
        call *sys_call_table(,%eax,4)
+syscall_after_call:
        movl %eax,PT_EAX(%esp)          # store the return value
 syscall_exit:
        LOCKDEP_SYS_EXIT
@@ -675,12 +676,12 @@ syscall_fault:
 END(syscall_fault)
 
 syscall_badsys:
-       movl $-ENOSYS,PT_EAX(%esp)
-       jmp syscall_exit
+       movl $-ENOSYS,%eax
+       jmp syscall_after_call
 END(syscall_badsys)
 
 sysenter_badsys:
-       movl $-ENOSYS,PT_EAX(%esp)
+       movl $-ENOSYS,%eax
        jmp sysenter_after_call
 END(syscall_badsys)
        CFI_ENDPROC
index 6afbb16e9b794819e23e2e284e06206f59bc92c3..94d857fb103396cec7eb62981a86ce7ed43f5001 100644 (file)
@@ -175,7 +175,7 @@ void init_espfix_ap(void)
        if (!pud_present(pud)) {
                pmd_p = (pmd_t *)__get_free_page(PGALLOC_GFP);
                pud = __pud(__pa(pmd_p) | (PGTABLE_PROT & ptemask));
-               paravirt_alloc_pud(&init_mm, __pa(pmd_p) >> PAGE_SHIFT);
+               paravirt_alloc_pmd(&init_mm, __pa(pmd_p) >> PAGE_SHIFT);
                for (n = 0; n < ESPFIX_PUD_CLONES; n++)
                        set_pud(&pud_p[n], pud);
        }
@@ -185,7 +185,7 @@ void init_espfix_ap(void)
        if (!pmd_present(pmd)) {
                pte_p = (pte_t *)__get_free_page(PGALLOC_GFP);
                pmd = __pmd(__pa(pte_p) | (PGTABLE_PROT & ptemask));
-               paravirt_alloc_pmd(&init_mm, __pa(pte_p) >> PAGE_SHIFT);
+               paravirt_alloc_pte(&init_mm, __pa(pte_p) >> PAGE_SHIFT);
                for (n = 0; n < ESPFIX_PMD_CLONES; n++)
                        set_pmd(&pmd_p[n], pmd);
        }
@@ -193,7 +193,6 @@ void init_espfix_ap(void)
        pte_p = pte_offset_kernel(&pmd, addr);
        stack_page = (void *)__get_free_page(GFP_KERNEL);
        pte = __pte(__pa(stack_page) | (__PAGE_KERNEL_RO & ptemask));
-       paravirt_alloc_pte(&init_mm, __pa(stack_page) >> PAGE_SHIFT);
        for (n = 0; n < ESPFIX_PTE_CLONES; n++)
                set_pte(&pte_p[n*PTE_STRIDE], pte);
 
index 7596df664901eed5a7aea5003ab83da49d34a615..67e6d19ef1be65e49a176a1c3bbd43cab654e7f5 100644 (file)
@@ -574,6 +574,9 @@ int kprobe_int3_handler(struct pt_regs *regs)
        struct kprobe *p;
        struct kprobe_ctlblk *kcb;
 
+       if (user_mode_vm(regs))
+               return 0;
+
        addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
        /*
         * We don't want to be preempted for the entire
index 57e5ce126d5af8ca7fdeae2145aae58805852103..ea030319b321edc254f58c8b08b31de2caeaf1aa 100644 (file)
@@ -920,9 +920,9 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
                tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new);
                if (!(freq->flags & CPUFREQ_CONST_LOOPS))
                        mark_tsc_unstable("cpufreq changes");
-       }
 
-       set_cyc2ns_scale(tsc_khz, freq->cpu);
+               set_cyc2ns_scale(tsc_khz, freq->cpu);
+       }
 
        return 0;
 }
index f6449334ec4514741f7f79481e419d30442d1ac2..ef432f891d30a69468a8093c2bd6f33ca1a7d54f 100644 (file)
@@ -5887,6 +5887,18 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
                        kvm_x86_ops->set_nmi(vcpu);
                }
        } else if (kvm_cpu_has_injectable_intr(vcpu)) {
+               /*
+                * Because interrupts can be injected asynchronously, we are
+                * calling check_nested_events again here to avoid a race condition.
+                * See https://lkml.org/lkml/2014/7/2/60 for discussion about this
+                * proposal and current concerns.  Perhaps we should be setting
+                * KVM_REQ_EVENT only on certain events and not unconditionally?
+                */
+               if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) {
+                       r = kvm_x86_ops->check_nested_events(vcpu, req_int_win);
+                       if (r != 0)
+                               return r;
+               }
                if (kvm_x86_ops->interrupt_allowed(vcpu)) {
                        kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu),
                                            false);
index f9e1ec346e359c7410482c5d8dfd5a9de88bfafc..8453e6e398951b0d1864b6e570c8d7d63e45d356 100644 (file)
@@ -376,38 +376,42 @@ _DoubleExceptionVector_WindowOverflow:
        beqz    a2, 1f          # if at start of vector, don't restore
 
        addi    a0, a0, -128
-       bbsi    a0, 8, 1f       # don't restore except for overflow 8 and 12
-       bbsi    a0, 7, 2f
+       bbsi.l  a0, 8, 1f       # don't restore except for overflow 8 and 12
+
+       /*
+        * This fixup handler is for the extremely unlikely case where the
+        * overflow handler's reference thru a0 gets a hardware TLB refill
+        * that bumps out the (distinct, aliasing) TLB entry that mapped its
+        * prior references thru a9/a13, and where our reference now thru
+        * a9/a13 gets a 2nd-level miss exception (not hardware TLB refill).
+        */
+       movi    a2, window_overflow_restore_a0_fixup
+       s32i    a2, a3, EXC_TABLE_FIXUP
+       l32i    a2, a3, EXC_TABLE_DOUBLE_SAVE
+       xsr     a3, excsave1
+
+       bbsi.l  a0, 7, 2f
 
        /*
         * Restore a0 as saved by _WindowOverflow8().
-        *
-        * FIXME:  we really need a fixup handler for this L32E,
-        * for the extremely unlikely case where the overflow handler's
-        * reference thru a0 gets a hardware TLB refill that bumps out
-        * the (distinct, aliasing) TLB entry that mapped its prior
-        * references thru a9, and where our reference now thru a9
-        * gets a 2nd-level miss exception (not hardware TLB refill).
         */
 
-       l32e    a2, a9, -16
-       wsr     a2, depc        # replace the saved a0
-       j       1f
+       l32e    a0, a9, -16
+       wsr     a0, depc        # replace the saved a0
+       j       3f
 
 2:
        /*
         * Restore a0 as saved by _WindowOverflow12().
-        *
-        * FIXME:  we really need a fixup handler for this L32E,
-        * for the extremely unlikely case where the overflow handler's
-        * reference thru a0 gets a hardware TLB refill that bumps out
-        * the (distinct, aliasing) TLB entry that mapped its prior
-        * references thru a13, and where our reference now thru a13
-        * gets a 2nd-level miss exception (not hardware TLB refill).
         */
 
-       l32e    a2, a13, -16
-       wsr     a2, depc        # replace the saved a0
+       l32e    a0, a13, -16
+       wsr     a0, depc        # replace the saved a0
+3:
+       xsr     a3, excsave1
+       movi    a0, 0
+       s32i    a0, a3, EXC_TABLE_FIXUP
+       s32i    a2, a3, EXC_TABLE_DOUBLE_SAVE
 1:
        /*
         * Restore WindowBase while leaving all address registers restored.
@@ -449,6 +453,7 @@ _DoubleExceptionVector_WindowOverflow:
 
        s32i    a0, a2, PT_DEPC
 
+_DoubleExceptionVector_handle_exception:
        addx4   a0, a0, a3
        l32i    a0, a0, EXC_TABLE_FAST_USER
        xsr     a3, excsave1
@@ -464,10 +469,119 @@ _DoubleExceptionVector_WindowOverflow:
        rotw    -3
        j       1b
 
-       .end literal_prefix
 
 ENDPROC(_DoubleExceptionVector)
 
+/*
+ * Fixup handler for TLB miss in double exception handler for window owerflow.
+ * We get here with windowbase set to the window that was being spilled and
+ * a0 trashed. a0 bit 7 determines if this is a call8 (bit clear) or call12
+ * (bit set) window.
+ *
+ * We do the following here:
+ * - go to the original window retaining a0 value;
+ * - set up exception stack to return back to appropriate a0 restore code
+ *   (we'll need to rotate window back and there's no place to save this
+ *    information, use different return address for that);
+ * - handle the exception;
+ * - go to the window that was being spilled;
+ * - set up window_overflow_restore_a0_fixup as a fixup routine;
+ * - reload a0;
+ * - restore the original window;
+ * - reset the default fixup routine;
+ * - return to user. By the time we get to this fixup handler all information
+ *   about the conditions of the original double exception that happened in
+ *   the window overflow handler is lost, so we just return to userspace to
+ *   retry overflow from start.
+ *
+ * a0: value of depc, original value in depc
+ * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE
+ * a3: exctable, original value in excsave1
+ */
+
+ENTRY(window_overflow_restore_a0_fixup)
+
+       rsr     a0, ps
+       extui   a0, a0, PS_OWB_SHIFT, PS_OWB_WIDTH
+       rsr     a2, windowbase
+       sub     a0, a2, a0
+       extui   a0, a0, 0, 3
+       l32i    a2, a3, EXC_TABLE_DOUBLE_SAVE
+       xsr     a3, excsave1
+
+       _beqi   a0, 1, .Lhandle_1
+       _beqi   a0, 3, .Lhandle_3
+
+       .macro  overflow_fixup_handle_exception_pane n
+
+       rsr     a0, depc
+       rotw    -\n
+
+       xsr     a3, excsave1
+       wsr     a2, depc
+       l32i    a2, a3, EXC_TABLE_KSTK
+       s32i    a0, a2, PT_AREG0
+
+       movi    a0, .Lrestore_\n
+       s32i    a0, a2, PT_DEPC
+       rsr     a0, exccause
+       j       _DoubleExceptionVector_handle_exception
+
+       .endm
+
+       overflow_fixup_handle_exception_pane 2
+.Lhandle_1:
+       overflow_fixup_handle_exception_pane 1
+.Lhandle_3:
+       overflow_fixup_handle_exception_pane 3
+
+       .macro  overflow_fixup_restore_a0_pane n
+
+       rotw    \n
+       /* Need to preserve a0 value here to be able to handle exception
+        * that may occur on a0 reload from stack. It may occur because
+        * TLB miss handler may not be atomic and pointer to page table
+        * may be lost before we get here. There are no free registers,
+        * so we need to use EXC_TABLE_DOUBLE_SAVE area.
+        */
+       xsr     a3, excsave1
+       s32i    a2, a3, EXC_TABLE_DOUBLE_SAVE
+       movi    a2, window_overflow_restore_a0_fixup
+       s32i    a2, a3, EXC_TABLE_FIXUP
+       l32i    a2, a3, EXC_TABLE_DOUBLE_SAVE
+       xsr     a3, excsave1
+       bbsi.l  a0, 7, 1f
+       l32e    a0, a9, -16
+       j       2f
+1:
+       l32e    a0, a13, -16
+2:
+       rotw    -\n
+
+       .endm
+
+.Lrestore_2:
+       overflow_fixup_restore_a0_pane 2
+
+.Lset_default_fixup:
+       xsr     a3, excsave1
+       s32i    a2, a3, EXC_TABLE_DOUBLE_SAVE
+       movi    a2, 0
+       s32i    a2, a3, EXC_TABLE_FIXUP
+       l32i    a2, a3, EXC_TABLE_DOUBLE_SAVE
+       xsr     a3, excsave1
+       rfe
+
+.Lrestore_1:
+       overflow_fixup_restore_a0_pane 1
+       j       .Lset_default_fixup
+.Lrestore_3:
+       overflow_fixup_restore_a0_pane 3
+       j       .Lset_default_fixup
+
+ENDPROC(window_overflow_restore_a0_fixup)
+
+       .end literal_prefix
 /*
  * Debug interrupt vector
  *
index ee32c0085dff4296fb9290e3706733db9402460d..d16db6df86f8e3d823ac2f189816d9889249323e 100644 (file)
@@ -269,13 +269,13 @@ SECTIONS
                  .UserExceptionVector.literal)
   SECTION_VECTOR (_DoubleExceptionVector_literal,
                  .DoubleExceptionVector.literal,
-                 DOUBLEEXC_VECTOR_VADDR - 16,
+                 DOUBLEEXC_VECTOR_VADDR - 40,
                  SIZEOF(.UserExceptionVector.text),
                  .UserExceptionVector.text)
   SECTION_VECTOR (_DoubleExceptionVector_text,
                  .DoubleExceptionVector.text,
                  DOUBLEEXC_VECTOR_VADDR,
-                 32,
+                 40,
                  .DoubleExceptionVector.literal)
 
   . = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3;
index 4224256bb215f17c52d91662f186ecb250dee361..77ed20209ca57232dd79afa60fe91210515746bf 100644 (file)
@@ -191,7 +191,7 @@ int __init mem_reserve(unsigned long start, unsigned long end, int must_exist)
                return -EINVAL;
        }
 
-       if (it && start - it->start < bank_sz) {
+       if (it && start - it->start <= bank_sz) {
                if (start == it->start) {
                        if (end - it->start < bank_sz) {
                                it->start = end;
index b9f4cc494ecefbf2560483bdc4685bb98d59b5d3..28d227c5ca7781aed5bb15cf37d3b0f1dcaa8d79 100644 (file)
@@ -872,6 +872,13 @@ void blkcg_drain_queue(struct request_queue *q)
 {
        lockdep_assert_held(q->queue_lock);
 
+       /*
+        * @q could be exiting and already have destroyed all blkgs as
+        * indicated by NULL root_blkg.  If so, don't confuse policies.
+        */
+       if (!q->root_blkg)
+               return;
+
        blk_throtl_drain(q);
 }
 
index 3f33d86722688a4f50ac0064488540d1960447fb..a185b86741e5ff80dccbc5223eb570f764199d86 100644 (file)
@@ -27,18 +27,15 @@ struct request *blk_queue_find_tag(struct request_queue *q, int tag)
 EXPORT_SYMBOL(blk_queue_find_tag);
 
 /**
- * __blk_free_tags - release a given set of tag maintenance info
+ * blk_free_tags - release a given set of tag maintenance info
  * @bqt:       the tag map to free
  *
- * Tries to free the specified @bqt.  Returns true if it was
- * actually freed and false if there are still references using it
+ * Drop the reference count on @bqt and frees it when the last reference
+ * is dropped.
  */
-static int __blk_free_tags(struct blk_queue_tag *bqt)
+void blk_free_tags(struct blk_queue_tag *bqt)
 {
-       int retval;
-
-       retval = atomic_dec_and_test(&bqt->refcnt);
-       if (retval) {
+       if (atomic_dec_and_test(&bqt->refcnt)) {
                BUG_ON(find_first_bit(bqt->tag_map, bqt->max_depth) <
                                                        bqt->max_depth);
 
@@ -50,9 +47,8 @@ static int __blk_free_tags(struct blk_queue_tag *bqt)
 
                kfree(bqt);
        }
-
-       return retval;
 }
+EXPORT_SYMBOL(blk_free_tags);
 
 /**
  * __blk_queue_free_tags - release tag maintenance info
@@ -69,27 +65,12 @@ void __blk_queue_free_tags(struct request_queue *q)
        if (!bqt)
                return;
 
-       __blk_free_tags(bqt);
+       blk_free_tags(bqt);
 
        q->queue_tags = NULL;
        queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
 }
 
-/**
- * blk_free_tags - release a given set of tag maintenance info
- * @bqt:       the tag map to free
- *
- * For externally managed @bqt frees the map.  Callers of this
- * function must guarantee to have released all the queues that
- * might have been using this tag map.
- */
-void blk_free_tags(struct blk_queue_tag *bqt)
-{
-       if (unlikely(!__blk_free_tags(bqt)))
-               BUG();
-}
-EXPORT_SYMBOL(blk_free_tags);
-
 /**
  * blk_queue_free_tags - release tag maintenance info
  * @q:  the request queue for the device
index fbd5a67cb773886104cac49698ee589b8fbc9933..a0926a6094b28a7e4e67b3a88afc993719294405 100644 (file)
@@ -690,6 +690,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
        case BLKROSET:
        case BLKDISCARD:
        case BLKSECDISCARD:
+       case BLKZEROOUT:
        /*
         * the ones below are implemented in blkdev_locked_ioctl,
         * but we call blkdev_ioctl, which gets the lock for us
index a34a22841002495713a74f960dea482ecee8dc11..3f5f745bbbea80327086e7391152b3a283733188 100644 (file)
@@ -42,6 +42,12 @@ menuconfig ACPI
 
 if ACPI
 
+config ACPI_LEGACY_TABLES_LOOKUP
+       bool
+
+config ARCH_MIGHT_HAVE_ACPI_PDC
+       bool
+
 config ACPI_SLEEP
        bool
        depends on SUSPEND || HIBERNATION
index ea55e0179f817c6331149315a6c7f3245e84de2d..505d4d79fe3e4ce74a631f487ce243e6ae7decb3 100644 (file)
@@ -36,6 +36,7 @@ acpi-y                                += scan.o
 acpi-y                         += resource.o
 acpi-y                         += acpi_processor.o
 acpi-y                         += processor_core.o
+acpi-$(CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC) += processor_pdc.o
 acpi-y                         += ec.o
 acpi-$(CONFIG_ACPI_DOCK)       += dock.o
 acpi-y                         += pci_root.o pci_link.o pci_irq.o
index 185334114d71005e649f10fd04acfcd4b7bf14ef..340d09518f8e986a56bffd4dc61291fbcb6e0b87 100644 (file)
@@ -69,11 +69,11 @@ static u32 l1_percpu_entry;
 #define ELOG_ENTRY_ADDR(phyaddr) \
        (phyaddr - elog_base + (u8 *)elog_addr)
 
-static struct acpi_generic_status *extlog_elog_entry_check(int cpu, int bank)
+static struct acpi_hest_generic_status *extlog_elog_entry_check(int cpu, int bank)
 {
        int idx;
        u64 data;
-       struct acpi_generic_status *estatus;
+       struct acpi_hest_generic_status *estatus;
 
        WARN_ON(cpu < 0);
        idx = ELOG_IDX(cpu, bank);
@@ -82,7 +82,7 @@ static struct acpi_generic_status *extlog_elog_entry_check(int cpu, int bank)
                return NULL;
 
        data &= EXT_ELOG_ENTRY_MASK;
-       estatus = (struct acpi_generic_status *)ELOG_ENTRY_ADDR(data);
+       estatus = (struct acpi_hest_generic_status *)ELOG_ENTRY_ADDR(data);
 
        /* if no valid data in elog entry, just return */
        if (estatus->block_status == 0)
@@ -92,7 +92,7 @@ static struct acpi_generic_status *extlog_elog_entry_check(int cpu, int bank)
 }
 
 static void __print_extlog_rcd(const char *pfx,
-                              struct acpi_generic_status *estatus, int cpu)
+                              struct acpi_hest_generic_status *estatus, int cpu)
 {
        static atomic_t seqno;
        unsigned int curr_seqno;
@@ -111,7 +111,7 @@ static void __print_extlog_rcd(const char *pfx,
 }
 
 static int print_extlog_rcd(const char *pfx,
-                           struct acpi_generic_status *estatus, int cpu)
+                           struct acpi_hest_generic_status *estatus, int cpu)
 {
        /* Not more than 2 messages every 5 seconds */
        static DEFINE_RATELIMIT_STATE(ratelimit_corrected, 5*HZ, 2);
@@ -137,7 +137,7 @@ static int extlog_print(struct notifier_block *nb, unsigned long val,
        struct mce *mce = (struct mce *)data;
        int     bank = mce->bank;
        int     cpu = mce->extcpu;
-       struct acpi_generic_status *estatus;
+       struct acpi_hest_generic_status *estatus;
        int rc;
 
        estatus = extlog_elog_entry_check(cpu, bank);
@@ -148,7 +148,7 @@ static int extlog_print(struct notifier_block *nb, unsigned long val,
        /* clear record status to enable BIOS to update it again */
        estatus->block_status = 0;
 
-       rc = print_extlog_rcd(NULL, (struct acpi_generic_status *)elog_buf, cpu);
+       rc = print_extlog_rcd(NULL, (struct acpi_hest_generic_status *)elog_buf, cpu);
 
        return NOTIFY_STOP;
 }
index 1c085742644faaef4f2da64bb1d7e857a7bd16f9..1fdf5e07a1c7cb0440594b12f8b78408c1c25bd4 100644 (file)
@@ -268,7 +268,7 @@ static int acpi_processor_get_info(struct acpi_device *device)
        pr->apic_id = apic_id;
 
        cpu_index = acpi_map_cpuid(pr->apic_id, pr->acpi_id);
-       if (!cpu0_initialized && !acpi_lapic) {
+       if (!cpu0_initialized && !acpi_has_cpu_in_madt()) {
                cpu0_initialized = 1;
                /* Handle UP system running SMP kernel, with no LAPIC in MADT */
                if ((cpu_index == -1) && (num_online_cpus() == 1))
index 8bb43f06e11fda07c6e71718fab7b2baa37a5a57..4be4cc94572d222a56960df60208fc7103d61b83 100644 (file)
@@ -2,7 +2,7 @@
 # Makefile for ACPICA Core interpreter
 #
 
-ccflags-y                      := -Os
+ccflags-y                      := -Os -DBUILDING_ACPICA
 ccflags-$(CONFIG_ACPI_DEBUG)   += -DACPI_DEBUG_OUTPUT
 
 # use acpi.o to put all files here into acpi.o modparam namespace
@@ -175,5 +175,5 @@ acpi-y +=           \
        utxferror.o     \
        utxfmutex.o
 
-acpi-$(ACPI_FUTURE_USAGE) += uttrack.o utcache.o
+acpi-$(ACPI_FUTURE_USAGE) += utfileio.o utprint.o uttrack.o utcache.o
 
index 8698ffba6f392dd0e4c8b7db4b918ce9f4796464..3d2c88289da9ff88a0dde088ee5a3aba30362131 100644 (file)
 /* Macros for usage messages */
 
 #define ACPI_USAGE_HEADER(usage) \
-       printf ("Usage: %s\nOptions:\n", usage);
+       acpi_os_printf ("Usage: %s\nOptions:\n", usage);
+
+#define ACPI_USAGE_TEXT(description) \
+       acpi_os_printf (description);
 
 #define ACPI_OPTION(name, description) \
-       printf ("  %-18s%s\n", name, description);
+       acpi_os_printf (" %-18s%s\n", name, description);
 
 #define FILE_SUFFIX_DISASSEMBLY     "dsl"
 #define ACPI_TABLE_FILE_SUFFIX      ".dat"
@@ -102,7 +105,7 @@ extern char *acpi_gbl_optarg;
 /*
  * cmfsize - Common get file size function
  */
-u32 cm_get_file_size(FILE * file);
+u32 cm_get_file_size(ACPI_FILE file);
 
 #ifndef ACPI_DUMP_APP
 /*
index 68a91eb0fa483f24b55218770c4bed079873d3e6..1d026ff1683f381fffaae26cfa9262d9e208416d 100644 (file)
@@ -233,9 +233,6 @@ acpi_status acpi_db_load_acpi_table(char *filename);
 acpi_status
 acpi_db_get_table_from_file(char *filename, struct acpi_table_header **table);
 
-acpi_status
-acpi_db_read_table_from_file(char *filename, struct acpi_table_header **table);
-
 /*
  * dbhistry - debugger HISTORY command
  */
index 115eedcade1e3d9ad652830936ac80c52ff42fec..ebf02cc10a430a9c52d99e3f245f20ce671ceab5 100644 (file)
@@ -297,7 +297,7 @@ ACPI_GLOBAL(u32, acpi_gbl_trace_dbg_layer);
  *
  ****************************************************************************/
 
-ACPI_GLOBAL(u8, acpi_gbl_db_output_flags);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_db_output_flags, ACPI_DB_CONSOLE_OUTPUT);
 
 #ifdef ACPI_DISASSEMBLER
 
@@ -362,6 +362,12 @@ ACPI_GLOBAL(u32, acpi_gbl_num_objects);
 #ifdef ACPI_APPLICATION
 
 ACPI_INIT_GLOBAL(ACPI_FILE, acpi_gbl_debug_file, NULL);
+ACPI_INIT_GLOBAL(ACPI_FILE, acpi_gbl_output_file, NULL);
+
+/* Print buffer */
+
+ACPI_GLOBAL(acpi_spinlock, acpi_gbl_print_lock);       /* For print buffer */
+ACPI_GLOBAL(char, acpi_gbl_print_buffer[1024]);
 
 #endif                         /* ACPI_APPLICATION */
 
index 1e256c5bda20675e73c1dbccd736cfc56606aebc..ed614f4b218273f8b000ebc017d699e0d09ef141 100644 (file)
@@ -95,7 +95,6 @@ extern const char *acpi_gbl_pt_decode[];
 #ifdef ACPI_ASL_COMPILER
 
 #include <stdio.h>
-extern FILE *acpi_gbl_output_file;
 
 #define ACPI_MSG_REDIRECT_BEGIN \
        FILE                            *output_file = acpi_gbl_output_file; \
@@ -211,6 +210,8 @@ void acpi_ut_subsystem_shutdown(void);
 
 acpi_size acpi_ut_strlen(const char *string);
 
+char *acpi_ut_strchr(const char *string, int ch);
+
 char *acpi_ut_strcpy(char *dst_string, const char *src_string);
 
 char *acpi_ut_strncpy(char *dst_string,
@@ -257,7 +258,7 @@ extern const u8 _acpi_ctype[];
 #define ACPI_IS_XDIGIT(c) (_acpi_ctype[(unsigned char)(c)] & (_ACPI_XD))
 #define ACPI_IS_UPPER(c)  (_acpi_ctype[(unsigned char)(c)] & (_ACPI_UP))
 #define ACPI_IS_LOWER(c)  (_acpi_ctype[(unsigned char)(c)] & (_ACPI_LO))
-#define ACPI_IS_PRINT(c)  (_acpi_ctype[(unsigned char)(c)] & (_ACPI_LO | _ACPI_UP | _ACPI_DI | _ACPI_SP | _ACPI_PU))
+#define ACPI_IS_PRINT(c)  (_acpi_ctype[(unsigned char)(c)] & (_ACPI_LO | _ACPI_UP | _ACPI_DI | _ACPI_XS | _ACPI_PU))
 #define ACPI_IS_ALPHA(c)  (_acpi_ctype[(unsigned char)(c)] & (_ACPI_LO | _ACPI_UP))
 
 #endif                         /* !ACPI_USE_SYSTEM_CLIBRARY */
@@ -352,6 +353,13 @@ acpi_ut_debug_dump_buffer(u8 *buffer, u32 count, u32 display, u32 component_id);
 
 void acpi_ut_dump_buffer(u8 *buffer, u32 count, u32 display, u32 offset);
 
+#ifdef ACPI_APPLICATION
+void
+acpi_ut_dump_buffer_to_file(ACPI_FILE file,
+                           u8 *buffer,
+                           u32 count, u32 display, u32 base_offset);
+#endif
+
 void acpi_ut_report_error(char *module_name, u32 line_number);
 
 void acpi_ut_report_info(char *module_name, u32 line_number);
@@ -393,6 +401,14 @@ acpi_ut_execute_power_methods(struct acpi_namespace_node *device_node,
                              const char **method_names,
                              u8 method_count, u8 *out_values);
 
+/*
+ * utfileio - file operations
+ */
+#ifdef ACPI_APPLICATION
+acpi_status
+acpi_ut_read_table_from_file(char *filename, struct acpi_table_header **table);
+#endif
+
 /*
  * utids - device ID support
  */
@@ -743,4 +759,23 @@ const struct ah_predefined_name *acpi_ah_match_predefined_name(char *nameseg);
 
 const struct ah_device_id *acpi_ah_match_hardware_id(char *hid);
 
+/*
+ * utprint - printf/vprintf output functions
+ */
+const char *acpi_ut_scan_number(const char *string, u64 *number_ptr);
+
+const char *acpi_ut_print_number(char *string, u64 number);
+
+int
+acpi_ut_vsnprintf(char *string,
+                 acpi_size size, const char *format, va_list args);
+
+int acpi_ut_snprintf(char *string, acpi_size size, const char *format, ...);
+
+#ifdef ACPI_APPLICATION
+int acpi_ut_file_vprintf(ACPI_FILE file, const char *format, va_list args);
+
+int acpi_ut_file_printf(ACPI_FILE file, const char *format, ...);
+#endif
+
 #endif                         /* _ACUTILS_H */
index 48f70013b488c7cdbb956b6ab316afe1c8396f03..e4ba4dec86af19ef447f3a4783c39015e558778f 100644 (file)
@@ -697,21 +697,6 @@ acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device,
                                              acpi_gbl_global_event_handler_context);
        }
 
-       /*
-        * If edge-triggered, clear the GPE status bit now. Note that
-        * level-triggered events are cleared after the GPE is serviced.
-        */
-       if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
-           ACPI_GPE_EDGE_TRIGGERED) {
-               status = acpi_hw_clear_gpe(gpe_event_info);
-               if (ACPI_FAILURE(status)) {
-                       ACPI_EXCEPTION((AE_INFO, status,
-                                       "Unable to clear GPE %02X",
-                                       gpe_number));
-                       return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
-               }
-       }
-
        /*
         * Always disable the GPE so that it does not keep firing before
         * any asynchronous activity completes (either from the execution
@@ -728,6 +713,23 @@ acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device,
                return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
        }
 
+       /*
+        * If edge-triggered, clear the GPE status bit now. Note that
+        * level-triggered events are cleared after the GPE is serviced.
+        */
+       if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
+           ACPI_GPE_EDGE_TRIGGERED) {
+               status = acpi_hw_clear_gpe(gpe_event_info);
+               if (ACPI_FAILURE(status)) {
+                       ACPI_EXCEPTION((AE_INFO, status,
+                                       "Unable to clear GPE %02X",
+                                       gpe_number));
+                       (void)acpi_hw_low_set_gpe(gpe_event_info,
+                                                 ACPI_GPE_CONDITIONAL_ENABLE);
+                       return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
+               }
+       }
+
        /*
         * Dispatch the GPE to either an installed handler or the control
         * method associated with this GPE (_Lxx or _Exx). If a handler
index cb534faf536986da69a55fa3dc255e6de6a560cf..0cf159cc6e6d79085fa03ef15dcc91d9e53ec409 100644 (file)
@@ -126,11 +126,19 @@ acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number)
 
        flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
 
-       /* Ensure that we have a valid GPE number */
-
+       /*
+        * Ensure that we have a valid GPE number and that there is some way
+        * of handling the GPE (handler or a GPE method). In other words, we
+        * won't allow a valid GPE to be enabled if there is no way to handle it.
+        */
        gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
        if (gpe_event_info) {
-               status = acpi_ev_add_gpe_reference(gpe_event_info);
+               if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) !=
+                   ACPI_GPE_DISPATCH_NONE) {
+                       status = acpi_ev_add_gpe_reference(gpe_event_info);
+               } else {
+                       status = AE_NO_HANDLER;
+               }
        }
 
        acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
@@ -177,6 +185,53 @@ acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number)
 ACPI_EXPORT_SYMBOL(acpi_disable_gpe)
 
 
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_mark_gpe_for_wake
+ *
+ * PARAMETERS:  gpe_device          - Parent GPE Device. NULL for GPE0/GPE1
+ *              gpe_number          - GPE level within the GPE block
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Mark a GPE as having the ability to wake the system. Simply
+ *              sets the ACPI_GPE_CAN_WAKE flag.
+ *
+ * Some potential callers of acpi_setup_gpe_for_wake may know in advance that
+ * there won't be any notify handlers installed for device wake notifications
+ * from the given GPE (one example is a button GPE in Linux). For these cases,
+ * acpi_mark_gpe_for_wake should be used instead of acpi_setup_gpe_for_wake.
+ * This will set the ACPI_GPE_CAN_WAKE flag for the GPE without trying to
+ * setup implicit wake notification for it (since there's no handler method).
+ *
+ ******************************************************************************/
+acpi_status acpi_mark_gpe_for_wake(acpi_handle gpe_device, u32 gpe_number)
+{
+       struct acpi_gpe_event_info *gpe_event_info;
+       acpi_status status = AE_BAD_PARAMETER;
+       acpi_cpu_flags flags;
+
+       ACPI_FUNCTION_TRACE(acpi_mark_gpe_for_wake);
+
+       flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+       /* Ensure that we have a valid GPE number */
+
+       gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
+       if (gpe_event_info) {
+
+               /* Mark the GPE as a possible wake event */
+
+               gpe_event_info->flags |= ACPI_GPE_CAN_WAKE;
+               status = AE_OK;
+       }
+
+       acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+       return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_mark_gpe_for_wake)
+
 /*******************************************************************************
  *
  * FUNCTION:    acpi_setup_gpe_for_wake
index 12878e1982f77d5f69fcad7d266f7ac922f2a12d..1ff42c07b42b116b54bb0bb2166967f7578ea147 100644 (file)
@@ -56,7 +56,7 @@ acpi_ex_get_serial_access_length(u32 accessor_type, u32 access_length);
 
 /*******************************************************************************
  *
- * FUNCTION:    acpi_get_serial_access_bytes
+ * FUNCTION:    acpi_ex_get_serial_access_length
  *
  * PARAMETERS:  accessor_type   - The type of the protocol indicated by region
  *                                field access attributes
@@ -103,7 +103,7 @@ acpi_ex_get_serial_access_length(u32 accessor_type, u32 access_length)
        case AML_FIELD_ATTRIB_BLOCK_CALL:
        default:
 
-               length = ACPI_GSBUS_BUFFER_SIZE;
+               length = ACPI_GSBUS_BUFFER_SIZE - 2;
                break;
        }
 
index e0fd9b4978cd6a22af964a3165d26cfd62ea032f..a4c34d2c556b24d14f4954d3afe516a39f9b10d9 100644 (file)
@@ -278,8 +278,9 @@ acpi_status acpi_hw_clear_acpi_status(void)
 
        acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
 
-       if (ACPI_FAILURE(status))
+       if (ACPI_FAILURE(status)) {
                goto exit;
+       }
 
        /* Clear the GPE Bits in all GPE registers in all GPE blocks */
 
index fe54a8c73b8c8f1618c12badbe62d07ec73c7ea7..a42ee9d6970d727733cc260912352f56ba89cd22 100644 (file)
@@ -237,6 +237,16 @@ void acpi_ns_detach_object(struct acpi_namespace_node *node)
                    (node->object->common.type != ACPI_TYPE_LOCAL_DATA)) {
                        node->object = node->object->common.next_object;
                }
+
+               /*
+                * Detach the object from any data objects (which are still held by
+                * the namespace node)
+                */
+               if (obj_desc->common.next_object &&
+                   ((obj_desc->common.next_object)->common.type ==
+                    ACPI_TYPE_LOCAL_DATA)) {
+                       obj_desc->common.next_object = NULL;
+               }
        }
 
        /* Reset the node type to untyped */
index 3c16997406535dc2e85445af0fd1b60cd0db2610..038ea887f56292c430bce8222697fde5cac243ee 100644 (file)
@@ -199,3 +199,131 @@ acpi_ut_debug_dump_buffer(u8 *buffer, u32 count, u32 display, u32 component_id)
 
        acpi_ut_dump_buffer(buffer, count, display, 0);
 }
+
+#ifdef ACPI_APPLICATION
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_dump_buffer_to_file
+ *
+ * PARAMETERS:  file                - File descriptor
+ *              buffer              - Buffer to dump
+ *              count               - Amount to dump, in bytes
+ *              display             - BYTE, WORD, DWORD, or QWORD display:
+ *                                      DB_BYTE_DISPLAY
+ *                                      DB_WORD_DISPLAY
+ *                                      DB_DWORD_DISPLAY
+ *                                      DB_QWORD_DISPLAY
+ *              base_offset         - Beginning buffer offset (display only)
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Generic dump buffer in both hex and ascii to a file.
+ *
+ ******************************************************************************/
+
+void
+acpi_ut_dump_buffer_to_file(ACPI_FILE file,
+                           u8 *buffer, u32 count, u32 display, u32 base_offset)
+{
+       u32 i = 0;
+       u32 j;
+       u32 temp32;
+       u8 buf_char;
+
+       if (!buffer) {
+               acpi_ut_file_printf(file,
+                                   "Null Buffer Pointer in DumpBuffer!\n");
+               return;
+       }
+
+       if ((count < 4) || (count & 0x01)) {
+               display = DB_BYTE_DISPLAY;
+       }
+
+       /* Nasty little dump buffer routine! */
+
+       while (i < count) {
+
+               /* Print current offset */
+
+               acpi_ut_file_printf(file, "%6.4X: ", (base_offset + i));
+
+               /* Print 16 hex chars */
+
+               for (j = 0; j < 16;) {
+                       if (i + j >= count) {
+
+                               /* Dump fill spaces */
+
+                               acpi_ut_file_printf(file, "%*s",
+                                                   ((display * 2) + 1), " ");
+                               j += display;
+                               continue;
+                       }
+
+                       switch (display) {
+                       case DB_BYTE_DISPLAY:
+                       default:        /* Default is BYTE display */
+
+                               acpi_ut_file_printf(file, "%02X ",
+                                                   buffer[(acpi_size) i + j]);
+                               break;
+
+                       case DB_WORD_DISPLAY:
+
+                               ACPI_MOVE_16_TO_32(&temp32,
+                                                  &buffer[(acpi_size) i + j]);
+                               acpi_ut_file_printf(file, "%04X ", temp32);
+                               break;
+
+                       case DB_DWORD_DISPLAY:
+
+                               ACPI_MOVE_32_TO_32(&temp32,
+                                                  &buffer[(acpi_size) i + j]);
+                               acpi_ut_file_printf(file, "%08X ", temp32);
+                               break;
+
+                       case DB_QWORD_DISPLAY:
+
+                               ACPI_MOVE_32_TO_32(&temp32,
+                                                  &buffer[(acpi_size) i + j]);
+                               acpi_ut_file_printf(file, "%08X", temp32);
+
+                               ACPI_MOVE_32_TO_32(&temp32,
+                                                  &buffer[(acpi_size) i + j +
+                                                          4]);
+                               acpi_ut_file_printf(file, "%08X ", temp32);
+                               break;
+                       }
+
+                       j += display;
+               }
+
+               /*
+                * Print the ASCII equivalent characters but watch out for the bad
+                * unprintable ones (printable chars are 0x20 through 0x7E)
+                */
+               acpi_ut_file_printf(file, " ");
+               for (j = 0; j < 16; j++) {
+                       if (i + j >= count) {
+                               acpi_ut_file_printf(file, "\n");
+                               return;
+                       }
+
+                       buf_char = buffer[(acpi_size) i + j];
+                       if (ACPI_IS_PRINT(buf_char)) {
+                               acpi_ut_file_printf(file, "%c", buf_char);
+                       } else {
+                               acpi_ut_file_printf(file, ".");
+                       }
+               }
+
+               /* Done with that line. */
+
+               acpi_ut_file_printf(file, "\n");
+               i += 16;
+       }
+
+       return;
+}
+#endif
index 270c16464dd948181f98755f8e1f13809c9acf13..ff601c0f7c7a0024048f2e2d10478c6a7104ad13 100644 (file)
@@ -1001,5 +1001,11 @@ acpi_ut_copy_iobject_to_iobject(union acpi_operand_object *source_desc,
                status = acpi_ut_copy_simple_object(source_desc, *dest_desc);
        }
 
+       /* Delete the allocated object if copy failed */
+
+       if (ACPI_FAILURE(status)) {
+               acpi_ut_remove_reference(*dest_desc);
+       }
+
        return_ACPI_STATUS(status);
 }
index 21a20ac5b1e1aec4f8654b03caab01ba26466c8a..e516254c63b2260deeb1c3824292dd62f0472f1f 100644 (file)
@@ -561,3 +561,29 @@ acpi_ut_ptr_exit(u32 line_number,
 }
 
 #endif
+
+#ifdef ACPI_APPLICATION
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_log_error
+ *
+ * PARAMETERS:  format              - Printf format field
+ *              ...                 - Optional printf arguments
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Print error message to the console, used by applications.
+ *
+ ******************************************************************************/
+
+void ACPI_INTERNAL_VAR_XFACE acpi_log_error(const char *format, ...)
+{
+       va_list args;
+
+       va_start(args, format);
+       (void)acpi_ut_file_vprintf(ACPI_FILE_ERR, format, args);
+       va_end(args);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_log_error)
+#endif
diff --git a/drivers/acpi/acpica/utfileio.c b/drivers/acpi/acpica/utfileio.c
new file mode 100644 (file)
index 0000000..bdf9914
--- /dev/null
@@ -0,0 +1,332 @@
+/*******************************************************************************
+ *
+ * Module Name: utfileio - simple file I/O routines
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2014, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "actables.h"
+#include "acapps.h"
+
+#ifdef ACPI_ASL_COMPILER
+#include "aslcompiler.h"
+#endif
+
+#define _COMPONENT          ACPI_CA_DEBUGGER
+ACPI_MODULE_NAME("utfileio")
+
+#ifdef ACPI_APPLICATION
+/* Local prototypes */
+static acpi_status
+acpi_ut_check_text_mode_corruption(u8 *table,
+                                  u32 table_length, u32 file_length);
+
+static acpi_status
+acpi_ut_read_table(FILE * fp,
+                  struct acpi_table_header **table, u32 *table_length);
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_check_text_mode_corruption
+ *
+ * PARAMETERS:  table           - Table buffer
+ *              table_length    - Length of table from the table header
+ *              file_length     - Length of the file that contains the table
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Check table for text mode file corruption where all linefeed
+ *              characters (LF) have been replaced by carriage return linefeed
+ *              pairs (CR/LF).
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ut_check_text_mode_corruption(u8 *table, u32 table_length, u32 file_length)
+{
+       u32 i;
+       u32 pairs = 0;
+
+       if (table_length != file_length) {
+               ACPI_WARNING((AE_INFO,
+                             "File length (0x%X) is not the same as the table length (0x%X)",
+                             file_length, table_length));
+       }
+
+       /* Scan entire table to determine if each LF has been prefixed with a CR */
+
+       for (i = 1; i < file_length; i++) {
+               if (table[i] == 0x0A) {
+                       if (table[i - 1] != 0x0D) {
+
+                               /* The LF does not have a preceding CR, table not corrupted */
+
+                               return (AE_OK);
+                       } else {
+                               /* Found a CR/LF pair */
+
+                               pairs++;
+                       }
+                       i++;
+               }
+       }
+
+       if (!pairs) {
+               return (AE_OK);
+       }
+
+       /*
+        * Entire table scanned, each CR is part of a CR/LF pair --
+        * meaning that the table was treated as a text file somewhere.
+        *
+        * NOTE: We can't "fix" the table, because any existing CR/LF pairs in the
+        * original table are left untouched by the text conversion process --
+        * meaning that we cannot simply replace CR/LF pairs with LFs.
+        */
+       acpi_os_printf("Table has been corrupted by text mode conversion\n");
+       acpi_os_printf("All LFs (%u) were changed to CR/LF pairs\n", pairs);
+       acpi_os_printf("Table cannot be repaired!\n");
+       return (AE_BAD_VALUE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_read_table
+ *
+ * PARAMETERS:  fp              - File that contains table
+ *              table           - Return value, buffer with table
+ *              table_length    - Return value, length of table
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Load the DSDT from the file pointer
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ut_read_table(FILE * fp,
+                  struct acpi_table_header **table, u32 *table_length)
+{
+       struct acpi_table_header table_header;
+       u32 actual;
+       acpi_status status;
+       u32 file_size;
+       u8 standard_header = TRUE;
+       s32 count;
+
+       /* Get the file size */
+
+       file_size = cm_get_file_size(fp);
+       if (file_size == ACPI_UINT32_MAX) {
+               return (AE_ERROR);
+       }
+
+       if (file_size < 4) {
+               return (AE_BAD_HEADER);
+       }
+
+       /* Read the signature */
+
+       fseek(fp, 0, SEEK_SET);
+
+       count = fread(&table_header, 1, sizeof(struct acpi_table_header), fp);
+       if (count != sizeof(struct acpi_table_header)) {
+               acpi_os_printf("Could not read the table header\n");
+               return (AE_BAD_HEADER);
+       }
+
+       /* The RSDP table does not have standard ACPI header */
+
+       if (ACPI_VALIDATE_RSDP_SIG(table_header.signature)) {
+               *table_length = file_size;
+               standard_header = FALSE;
+       } else {
+
+#if 0
+               /* Validate the table header/length */
+
+               status = acpi_tb_validate_table_header(&table_header);
+               if (ACPI_FAILURE(status)) {
+                       acpi_os_printf("Table header is invalid!\n");
+                       return (status);
+               }
+#endif
+
+               /* File size must be at least as long as the Header-specified length */
+
+               if (table_header.length > file_size) {
+                       acpi_os_printf
+                           ("TableHeader length [0x%X] greater than the input file size [0x%X]\n",
+                            table_header.length, file_size);
+
+#ifdef ACPI_ASL_COMPILER
+                       status = fl_check_for_ascii(fp, NULL, FALSE);
+                       if (ACPI_SUCCESS(status)) {
+                               acpi_os_printf
+                                   ("File appears to be ASCII only, must be binary\n",
+                                    table_header.length, file_size);
+                       }
+#endif
+                       return (AE_BAD_HEADER);
+               }
+#ifdef ACPI_OBSOLETE_CODE
+               /* We only support a limited number of table types */
+
+               if (!ACPI_COMPARE_NAME
+                   ((char *)table_header.signature, ACPI_SIG_DSDT)
+                   && !ACPI_COMPARE_NAME((char *)table_header.signature,
+                                         ACPI_SIG_PSDT)
+                   && !ACPI_COMPARE_NAME((char *)table_header.signature,
+                                         ACPI_SIG_SSDT)) {
+                       acpi_os_printf
+                           ("Table signature [%4.4s] is invalid or not supported\n",
+                            (char *)table_header.signature);
+                       ACPI_DUMP_BUFFER(&table_header,
+                                        sizeof(struct acpi_table_header));
+                       return (AE_ERROR);
+               }
+#endif
+
+               *table_length = table_header.length;
+       }
+
+       /* Allocate a buffer for the table */
+
+       *table = acpi_os_allocate((size_t) file_size);
+       if (!*table) {
+               acpi_os_printf
+                   ("Could not allocate memory for ACPI table %4.4s (size=0x%X)\n",
+                    table_header.signature, *table_length);
+               return (AE_NO_MEMORY);
+       }
+
+       /* Get the rest of the table */
+
+       fseek(fp, 0, SEEK_SET);
+       actual = fread(*table, 1, (size_t) file_size, fp);
+       if (actual == file_size) {
+               if (standard_header) {
+
+                       /* Now validate the checksum */
+
+                       status = acpi_tb_verify_checksum((void *)*table,
+                                                        ACPI_CAST_PTR(struct
+                                                                      acpi_table_header,
+                                                                      *table)->
+                                                        length);
+
+                       if (status == AE_BAD_CHECKSUM) {
+                               status =
+                                   acpi_ut_check_text_mode_corruption((u8 *)
+                                                                      *table,
+                                                                      file_size,
+                                                                      (*table)->
+                                                                      length);
+                               return (status);
+                       }
+               }
+               return (AE_OK);
+       }
+
+       if (actual > 0) {
+               acpi_os_printf("Warning - reading table, asked for %X got %X\n",
+                              file_size, actual);
+               return (AE_OK);
+       }
+
+       acpi_os_printf("Error - could not read the table file\n");
+       acpi_os_free(*table);
+       *table = NULL;
+       *table_length = 0;
+       return (AE_ERROR);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_read_table_from_file
+ *
+ * PARAMETERS:  filename         - File where table is located
+ *              table            - Where a pointer to the table is returned
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Get an ACPI table from a file
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ut_read_table_from_file(char *filename, struct acpi_table_header ** table)
+{
+       FILE *file;
+       u32 file_size;
+       u32 table_length;
+       acpi_status status = AE_ERROR;
+
+       /* Open the file, get current size */
+
+       file = fopen(filename, "rb");
+       if (!file) {
+               perror("Could not open input file");
+               return (status);
+       }
+
+       file_size = cm_get_file_size(file);
+       if (file_size == ACPI_UINT32_MAX) {
+               goto exit;
+       }
+
+       /* Get the entire file */
+
+       fprintf(stderr,
+               "Loading Acpi table from file %10s - Length %.8u (%06X)\n",
+               filename, file_size, file_size);
+
+       status = acpi_ut_read_table(file, table, &table_length);
+       if (ACPI_FAILURE(status)) {
+               acpi_os_printf("Could not get table from the file\n");
+       }
+
+exit:
+       fclose(file);
+       return (status);
+}
+
+#endif
index d69be3cb3faebca6e981617b506a4dfdfcd18d7b..77ceac715f28bd40f1f3ad8f4ff8152f53fbf3f0 100644 (file)
@@ -214,152 +214,6 @@ struct acpi_fixed_event_info acpi_gbl_fixed_event_info[ACPI_NUM_FIXED_EVENTS] =
 };
 #endif                         /* !ACPI_REDUCED_HARDWARE */
 
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_init_globals
- *
- * PARAMETERS:  None
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Initialize ACPICA globals. All globals that require specific
- *              initialization should be initialized here. This allows for
- *              a warm restart.
- *
- ******************************************************************************/
-
-acpi_status acpi_ut_init_globals(void)
-{
-       acpi_status status;
-       u32 i;
-
-       ACPI_FUNCTION_TRACE(ut_init_globals);
-
-       /* Create all memory caches */
-
-       status = acpi_ut_create_caches();
-       if (ACPI_FAILURE(status)) {
-               return_ACPI_STATUS(status);
-       }
-
-       /* Address Range lists */
-
-       for (i = 0; i < ACPI_ADDRESS_RANGE_MAX; i++) {
-               acpi_gbl_address_range_list[i] = NULL;
-       }
-
-       /* Mutex locked flags */
-
-       for (i = 0; i < ACPI_NUM_MUTEX; i++) {
-               acpi_gbl_mutex_info[i].mutex = NULL;
-               acpi_gbl_mutex_info[i].thread_id = ACPI_MUTEX_NOT_ACQUIRED;
-               acpi_gbl_mutex_info[i].use_count = 0;
-       }
-
-       for (i = 0; i < ACPI_NUM_OWNERID_MASKS; i++) {
-               acpi_gbl_owner_id_mask[i] = 0;
-       }
-
-       /* Last owner_ID is never valid */
-
-       acpi_gbl_owner_id_mask[ACPI_NUM_OWNERID_MASKS - 1] = 0x80000000;
-
-       /* Event counters */
-
-       acpi_method_count = 0;
-       acpi_sci_count = 0;
-       acpi_gpe_count = 0;
-
-       for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) {
-               acpi_fixed_event_count[i] = 0;
-       }
-
-#if (!ACPI_REDUCED_HARDWARE)
-
-       /* GPE/SCI support */
-
-       acpi_gbl_all_gpes_initialized = FALSE;
-       acpi_gbl_gpe_xrupt_list_head = NULL;
-       acpi_gbl_gpe_fadt_blocks[0] = NULL;
-       acpi_gbl_gpe_fadt_blocks[1] = NULL;
-       acpi_current_gpe_count = 0;
-
-       acpi_gbl_global_event_handler = NULL;
-       acpi_gbl_sci_handler_list = NULL;
-
-#endif                         /* !ACPI_REDUCED_HARDWARE */
-
-       /* Global handlers */
-
-       acpi_gbl_global_notify[0].handler = NULL;
-       acpi_gbl_global_notify[1].handler = NULL;
-       acpi_gbl_exception_handler = NULL;
-       acpi_gbl_init_handler = NULL;
-       acpi_gbl_table_handler = NULL;
-       acpi_gbl_interface_handler = NULL;
-
-       /* Global Lock support */
-
-       acpi_gbl_global_lock_semaphore = NULL;
-       acpi_gbl_global_lock_mutex = NULL;
-       acpi_gbl_global_lock_acquired = FALSE;
-       acpi_gbl_global_lock_handle = 0;
-       acpi_gbl_global_lock_present = FALSE;
-
-       /* Miscellaneous variables */
-
-       acpi_gbl_DSDT = NULL;
-       acpi_gbl_cm_single_step = FALSE;
-       acpi_gbl_shutdown = FALSE;
-       acpi_gbl_ns_lookup_count = 0;
-       acpi_gbl_ps_find_count = 0;
-       acpi_gbl_acpi_hardware_present = TRUE;
-       acpi_gbl_last_owner_id_index = 0;
-       acpi_gbl_next_owner_id_offset = 0;
-       acpi_gbl_trace_dbg_level = 0;
-       acpi_gbl_trace_dbg_layer = 0;
-       acpi_gbl_debugger_configuration = DEBUGGER_THREADING;
-       acpi_gbl_db_output_flags = ACPI_DB_CONSOLE_OUTPUT;
-       acpi_gbl_osi_mutex = NULL;
-       acpi_gbl_reg_methods_executed = FALSE;
-
-       /* Hardware oriented */
-
-       acpi_gbl_events_initialized = FALSE;
-       acpi_gbl_system_awake_and_running = TRUE;
-
-       /* Namespace */
-
-       acpi_gbl_module_code_list = NULL;
-       acpi_gbl_root_node = NULL;
-       acpi_gbl_root_node_struct.name.integer = ACPI_ROOT_NAME;
-       acpi_gbl_root_node_struct.descriptor_type = ACPI_DESC_TYPE_NAMED;
-       acpi_gbl_root_node_struct.type = ACPI_TYPE_DEVICE;
-       acpi_gbl_root_node_struct.parent = NULL;
-       acpi_gbl_root_node_struct.child = NULL;
-       acpi_gbl_root_node_struct.peer = NULL;
-       acpi_gbl_root_node_struct.object = NULL;
-
-#ifdef ACPI_DISASSEMBLER
-       acpi_gbl_external_list = NULL;
-       acpi_gbl_num_external_methods = 0;
-       acpi_gbl_resolved_external_methods = 0;
-#endif
-
-#ifdef ACPI_DEBUG_OUTPUT
-       acpi_gbl_lowest_stack_pointer = ACPI_CAST_PTR(acpi_size, ACPI_SIZE_MAX);
-#endif
-
-#ifdef ACPI_DBG_TRACK_ALLOCATIONS
-       acpi_gbl_display_final_mem_stats = FALSE;
-       acpi_gbl_disable_mem_tracking = FALSE;
-#endif
-
-       ACPI_DEBUGGER_EXEC(acpi_gbl_db_terminate_threads = FALSE);
-
-       return_ACPI_STATUS(AE_OK);
-}
-
 /* Public globals */
 
 ACPI_EXPORT_SYMBOL(acpi_gbl_FADT)
index 5f56fc49021ecf9f198baec7370e940441ecfc06..77120ec9ea860385cf5e3adddd530f83fc790a7f 100644 (file)
@@ -102,6 +102,151 @@ static void acpi_ut_free_gpe_lists(void)
 }
 #endif                         /* !ACPI_REDUCED_HARDWARE */
 
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_init_globals
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Initialize ACPICA globals. All globals that require specific
+ *              initialization should be initialized here. This allows for
+ *              a warm restart.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ut_init_globals(void)
+{
+       acpi_status status;
+       u32 i;
+
+       ACPI_FUNCTION_TRACE(ut_init_globals);
+
+       /* Create all memory caches */
+
+       status = acpi_ut_create_caches();
+       if (ACPI_FAILURE(status)) {
+               return_ACPI_STATUS(status);
+       }
+
+       /* Address Range lists */
+
+       for (i = 0; i < ACPI_ADDRESS_RANGE_MAX; i++) {
+               acpi_gbl_address_range_list[i] = NULL;
+       }
+
+       /* Mutex locked flags */
+
+       for (i = 0; i < ACPI_NUM_MUTEX; i++) {
+               acpi_gbl_mutex_info[i].mutex = NULL;
+               acpi_gbl_mutex_info[i].thread_id = ACPI_MUTEX_NOT_ACQUIRED;
+               acpi_gbl_mutex_info[i].use_count = 0;
+       }
+
+       for (i = 0; i < ACPI_NUM_OWNERID_MASKS; i++) {
+               acpi_gbl_owner_id_mask[i] = 0;
+       }
+
+       /* Last owner_ID is never valid */
+
+       acpi_gbl_owner_id_mask[ACPI_NUM_OWNERID_MASKS - 1] = 0x80000000;
+
+       /* Event counters */
+
+       acpi_method_count = 0;
+       acpi_sci_count = 0;
+       acpi_gpe_count = 0;
+
+       for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) {
+               acpi_fixed_event_count[i] = 0;
+       }
+
+#if (!ACPI_REDUCED_HARDWARE)
+
+       /* GPE/SCI support */
+
+       acpi_gbl_all_gpes_initialized = FALSE;
+       acpi_gbl_gpe_xrupt_list_head = NULL;
+       acpi_gbl_gpe_fadt_blocks[0] = NULL;
+       acpi_gbl_gpe_fadt_blocks[1] = NULL;
+       acpi_current_gpe_count = 0;
+
+       acpi_gbl_global_event_handler = NULL;
+       acpi_gbl_sci_handler_list = NULL;
+
+#endif                         /* !ACPI_REDUCED_HARDWARE */
+
+       /* Global handlers */
+
+       acpi_gbl_global_notify[0].handler = NULL;
+       acpi_gbl_global_notify[1].handler = NULL;
+       acpi_gbl_exception_handler = NULL;
+       acpi_gbl_init_handler = NULL;
+       acpi_gbl_table_handler = NULL;
+       acpi_gbl_interface_handler = NULL;
+
+       /* Global Lock support */
+
+       acpi_gbl_global_lock_semaphore = NULL;
+       acpi_gbl_global_lock_mutex = NULL;
+       acpi_gbl_global_lock_acquired = FALSE;
+       acpi_gbl_global_lock_handle = 0;
+       acpi_gbl_global_lock_present = FALSE;
+
+       /* Miscellaneous variables */
+
+       acpi_gbl_DSDT = NULL;
+       acpi_gbl_cm_single_step = FALSE;
+       acpi_gbl_shutdown = FALSE;
+       acpi_gbl_ns_lookup_count = 0;
+       acpi_gbl_ps_find_count = 0;
+       acpi_gbl_acpi_hardware_present = TRUE;
+       acpi_gbl_last_owner_id_index = 0;
+       acpi_gbl_next_owner_id_offset = 0;
+       acpi_gbl_trace_dbg_level = 0;
+       acpi_gbl_trace_dbg_layer = 0;
+       acpi_gbl_debugger_configuration = DEBUGGER_THREADING;
+       acpi_gbl_osi_mutex = NULL;
+       acpi_gbl_reg_methods_executed = FALSE;
+
+       /* Hardware oriented */
+
+       acpi_gbl_events_initialized = FALSE;
+       acpi_gbl_system_awake_and_running = TRUE;
+
+       /* Namespace */
+
+       acpi_gbl_module_code_list = NULL;
+       acpi_gbl_root_node = NULL;
+       acpi_gbl_root_node_struct.name.integer = ACPI_ROOT_NAME;
+       acpi_gbl_root_node_struct.descriptor_type = ACPI_DESC_TYPE_NAMED;
+       acpi_gbl_root_node_struct.type = ACPI_TYPE_DEVICE;
+       acpi_gbl_root_node_struct.parent = NULL;
+       acpi_gbl_root_node_struct.child = NULL;
+       acpi_gbl_root_node_struct.peer = NULL;
+       acpi_gbl_root_node_struct.object = NULL;
+
+#ifdef ACPI_DISASSEMBLER
+       acpi_gbl_external_list = NULL;
+       acpi_gbl_num_external_methods = 0;
+       acpi_gbl_resolved_external_methods = 0;
+#endif
+
+#ifdef ACPI_DEBUG_OUTPUT
+       acpi_gbl_lowest_stack_pointer = ACPI_CAST_PTR(acpi_size, ACPI_SIZE_MAX);
+#endif
+
+#ifdef ACPI_DBG_TRACK_ALLOCATIONS
+       acpi_gbl_display_final_mem_stats = FALSE;
+       acpi_gbl_disable_mem_tracking = FALSE;
+#endif
+
+       ACPI_DEBUGGER_EXEC(acpi_gbl_db_terminate_threads = FALSE);
+
+       return_ACPI_STATUS(AE_OK);
+}
+
 /******************************************************************************
  *
  * FUNCTION:    acpi_ut_terminate
diff --git a/drivers/acpi/acpica/utprint.c b/drivers/acpi/acpica/utprint.c
new file mode 100644 (file)
index 0000000..1031164
--- /dev/null
@@ -0,0 +1,661 @@
+/******************************************************************************
+ *
+ * Module Name: utprint - Formatted printing routines
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2014, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+
+#define _COMPONENT          ACPI_UTILITIES
+ACPI_MODULE_NAME("utprint")
+
+#define ACPI_FORMAT_SIGN            0x01
+#define ACPI_FORMAT_SIGN_PLUS       0x02
+#define ACPI_FORMAT_SIGN_PLUS_SPACE 0x04
+#define ACPI_FORMAT_ZERO            0x08
+#define ACPI_FORMAT_LEFT            0x10
+#define ACPI_FORMAT_UPPER           0x20
+#define ACPI_FORMAT_PREFIX          0x40
+/* Local prototypes */
+static acpi_size
+acpi_ut_bound_string_length(const char *string, acpi_size count);
+
+static char *acpi_ut_bound_string_output(char *string, const char *end, char c);
+
+static char *acpi_ut_format_number(char *string,
+                                  char *end,
+                                  u64 number,
+                                  u8 base, s32 width, s32 precision, u8 type);
+
+static char *acpi_ut_put_number(char *string, u64 number, u8 base, u8 upper);
+
+/* Module globals */
+
+static const char acpi_gbl_lower_hex_digits[] = "0123456789abcdef";
+static const char acpi_gbl_upper_hex_digits[] = "0123456789ABCDEF";
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_bound_string_length
+ *
+ * PARAMETERS:  string              - String with boundary
+ *              count               - Boundary of the string
+ *
+ * RETURN:      Length of the string. Less than or equal to Count.
+ *
+ * DESCRIPTION: Calculate the length of a string with boundary.
+ *
+ ******************************************************************************/
+
+static acpi_size
+acpi_ut_bound_string_length(const char *string, acpi_size count)
+{
+       u32 length = 0;
+
+       while (*string && count) {
+               length++;
+               string++;
+               count--;
+       }
+
+       return (length);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_bound_string_output
+ *
+ * PARAMETERS:  string              - String with boundary
+ *              end                 - Boundary of the string
+ *              c                   - Character to be output to the string
+ *
+ * RETURN:      Updated position for next valid character
+ *
+ * DESCRIPTION: Output a character into a string with boundary check.
+ *
+ ******************************************************************************/
+
+static char *acpi_ut_bound_string_output(char *string, const char *end, char c)
+{
+
+       if (string < end) {
+               *string = c;
+       }
+
+       ++string;
+       return (string);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_put_number
+ *
+ * PARAMETERS:  string              - Buffer to hold reverse-ordered string
+ *              number              - Integer to be converted
+ *              base                - Base of the integer
+ *              upper               - Whether or not using upper cased digits
+ *
+ * RETURN:      Updated position for next valid character
+ *
+ * DESCRIPTION: Convert an integer into a string, note that, the string holds a
+ *              reversed ordered number without the trailing zero.
+ *
+ ******************************************************************************/
+
+static char *acpi_ut_put_number(char *string, u64 number, u8 base, u8 upper)
+{
+       const char *digits;
+       u64 digit_index;
+       char *pos;
+
+       pos = string;
+       digits = upper ? acpi_gbl_upper_hex_digits : acpi_gbl_lower_hex_digits;
+
+       if (number == 0) {
+               *(pos++) = '0';
+       } else {
+               while (number) {
+                       (void)acpi_ut_divide(number, base, &number,
+                                            &digit_index);
+                       *(pos++) = digits[digit_index];
+               }
+       }
+
+       /* *(Pos++) = '0'; */
+       return (pos);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_scan_number
+ *
+ * PARAMETERS:  string              - String buffer
+ *              number_ptr          - Where the number is returned
+ *
+ * RETURN:      Updated position for next valid character
+ *
+ * DESCRIPTION: Scan a string for a decimal integer.
+ *
+ ******************************************************************************/
+
+const char *acpi_ut_scan_number(const char *string, u64 *number_ptr)
+{
+       u64 number = 0;
+
+       while (ACPI_IS_DIGIT(*string)) {
+               number *= 10;
+               number += *(string++) - '0';
+       }
+
+       *number_ptr = number;
+       return (string);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_print_number
+ *
+ * PARAMETERS:  string              - String buffer
+ *              number              - The number to be converted
+ *
+ * RETURN:      Updated position for next valid character
+ *
+ * DESCRIPTION: Print a decimal integer into a string.
+ *
+ ******************************************************************************/
+
+const char *acpi_ut_print_number(char *string, u64 number)
+{
+       char ascii_string[20];
+       const char *pos1;
+       char *pos2;
+
+       pos1 = acpi_ut_put_number(ascii_string, number, 10, FALSE);
+       pos2 = string;
+
+       while (pos1 != ascii_string) {
+               *(pos2++) = *(--pos1);
+       }
+
+       *pos2 = 0;
+       return (string);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_format_number
+ *
+ * PARAMETERS:  string              - String buffer with boundary
+ *              end                 - Boundary of the string
+ *              number              - The number to be converted
+ *              base                - Base of the integer
+ *              width               - Field width
+ *              precision           - Precision of the integer
+ *              type                - Special printing flags
+ *
+ * RETURN:      Updated position for next valid character
+ *
+ * DESCRIPTION: Print an integer into a string with any base and any precision.
+ *
+ ******************************************************************************/
+
+static char *acpi_ut_format_number(char *string,
+                                  char *end,
+                                  u64 number,
+                                  u8 base, s32 width, s32 precision, u8 type)
+{
+       char sign;
+       char zero;
+       u8 need_prefix;
+       u8 upper;
+       s32 i;
+       char reversed_string[66];
+
+       /* Parameter validation */
+
+       if (base < 2 || base > 16) {
+               return (NULL);
+       }
+
+       if (type & ACPI_FORMAT_LEFT) {
+               type &= ~ACPI_FORMAT_ZERO;
+       }
+
+       need_prefix = ((type & ACPI_FORMAT_PREFIX)
+                      && base != 10) ? TRUE : FALSE;
+       upper = (type & ACPI_FORMAT_UPPER) ? TRUE : FALSE;
+       zero = (type & ACPI_FORMAT_ZERO) ? '0' : ' ';
+
+       /* Calculate size according to sign and prefix */
+
+       sign = '\0';
+       if (type & ACPI_FORMAT_SIGN) {
+               if ((s64) number < 0) {
+                       sign = '-';
+                       number = -(s64) number;
+                       width--;
+               } else if (type & ACPI_FORMAT_SIGN_PLUS) {
+                       sign = '+';
+                       width--;
+               } else if (type & ACPI_FORMAT_SIGN_PLUS_SPACE) {
+                       sign = ' ';
+                       width--;
+               }
+       }
+       if (need_prefix) {
+               width--;
+               if (base == 16) {
+                       width--;
+               }
+       }
+
+       /* Generate full string in reverse order */
+
+       i = ACPI_PTR_DIFF(acpi_ut_put_number
+                         (reversed_string, number, base, upper),
+                         reversed_string);
+
+       /* Printing 100 using %2d gives "100", not "00" */
+
+       if (i > precision) {
+               precision = i;
+       }
+
+       width -= precision;
+
+       /* Output the string */
+
+       if (!(type & (ACPI_FORMAT_ZERO | ACPI_FORMAT_LEFT))) {
+               while (--width >= 0) {
+                       string = acpi_ut_bound_string_output(string, end, ' ');
+               }
+       }
+       if (sign) {
+               string = acpi_ut_bound_string_output(string, end, sign);
+       }
+       if (need_prefix) {
+               string = acpi_ut_bound_string_output(string, end, '0');
+               if (base == 16) {
+                       string = acpi_ut_bound_string_output(string, end,
+                                                            upper ? 'X' : 'x');
+               }
+       }
+       if (!(type & ACPI_FORMAT_LEFT)) {
+               while (--width >= 0) {
+                       string = acpi_ut_bound_string_output(string, end, zero);
+               }
+       }
+
+       while (i <= --precision) {
+               string = acpi_ut_bound_string_output(string, end, '0');
+       }
+       while (--i >= 0) {
+               string = acpi_ut_bound_string_output(string, end,
+                                                    reversed_string[i]);
+       }
+       while (--width >= 0) {
+               string = acpi_ut_bound_string_output(string, end, ' ');
+       }
+
+       return (string);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_vsnprintf
+ *
+ * PARAMETERS:  string              - String with boundary
+ *              size                - Boundary of the string
+ *              format              - Standard printf format
+ *              args                - Argument list
+ *
+ * RETURN:      Number of bytes actually written.
+ *
+ * DESCRIPTION: Formatted output to a string using argument list pointer.
+ *
+ ******************************************************************************/
+
+int
+acpi_ut_vsnprintf(char *string,
+                 acpi_size size, const char *format, va_list args)
+{
+       u8 base = 10;
+       u8 type = 0;
+       s32 width = -1;
+       s32 precision = -1;
+       char qualifier = 0;
+       u64 number;
+       char *pos;
+       char *end;
+       char c;
+       const char *s;
+       const void *p;
+       s32 length;
+       int i;
+
+       pos = string;
+       end = string + size;
+
+       for (; *format; ++format) {
+               if (*format != '%') {
+                       pos = acpi_ut_bound_string_output(pos, end, *format);
+                       continue;
+               }
+
+               /* Process sign */
+
+               do {
+                       ++format;
+                       if (*format == '#') {
+                               type |= ACPI_FORMAT_PREFIX;
+                       } else if (*format == '0') {
+                               type |= ACPI_FORMAT_ZERO;
+                       } else if (*format == '+') {
+                               type |= ACPI_FORMAT_SIGN_PLUS;
+                       } else if (*format == ' ') {
+                               type |= ACPI_FORMAT_SIGN_PLUS_SPACE;
+                       } else if (*format == '-') {
+                               type |= ACPI_FORMAT_LEFT;
+                       } else {
+                               break;
+                       }
+               } while (1);
+
+               /* Process width */
+
+               if (ACPI_IS_DIGIT(*format)) {
+                       format = acpi_ut_scan_number(format, &number);
+                       width = (s32) number;
+               } else if (*format == '*') {
+                       ++format;
+                       width = va_arg(args, int);
+                       if (width < 0) {
+                               width = -width;
+                               type |= ACPI_FORMAT_LEFT;
+                       }
+               }
+
+               /* Process precision */
+
+               if (*format == '.') {
+                       ++format;
+                       if (ACPI_IS_DIGIT(*format)) {
+                               format = acpi_ut_scan_number(format, &number);
+                               precision = (s32) number;
+                       } else if (*format == '*') {
+                               ++format;
+                               precision = va_arg(args, int);
+                       }
+                       if (precision < 0) {
+                               precision = 0;
+                       }
+               }
+
+               /* Process qualifier */
+
+               if (*format == 'h' || *format == 'l' || *format == 'L') {
+                       qualifier = *format;
+                       ++format;
+
+                       if (qualifier == 'l' && *format == 'l') {
+                               qualifier = 'L';
+                               ++format;
+                       }
+               }
+
+               switch (*format) {
+               case '%':
+
+                       pos = acpi_ut_bound_string_output(pos, end, '%');
+                       continue;
+
+               case 'c':
+
+                       if (!(type & ACPI_FORMAT_LEFT)) {
+                               while (--width > 0) {
+                                       pos =
+                                           acpi_ut_bound_string_output(pos,
+                                                                       end,
+                                                                       ' ');
+                               }
+                       }
+
+                       c = (char)va_arg(args, int);
+                       pos = acpi_ut_bound_string_output(pos, end, c);
+
+                       while (--width > 0) {
+                               pos =
+                                   acpi_ut_bound_string_output(pos, end, ' ');
+                       }
+                       continue;
+
+               case 's':
+
+                       s = va_arg(args, char *);
+                       if (!s) {
+                               s = "<NULL>";
+                       }
+                       length = acpi_ut_bound_string_length(s, precision);
+                       if (!(type & ACPI_FORMAT_LEFT)) {
+                               while (length < width--) {
+                                       pos =
+                                           acpi_ut_bound_string_output(pos,
+                                                                       end,
+                                                                       ' ');
+                               }
+                       }
+                       for (i = 0; i < length; ++i) {
+                               pos = acpi_ut_bound_string_output(pos, end, *s);
+                               ++s;
+                       }
+                       while (length < width--) {
+                               pos =
+                                   acpi_ut_bound_string_output(pos, end, ' ');
+                       }
+                       continue;
+
+               case 'o':
+
+                       base = 8;
+                       break;
+
+               case 'X':
+
+                       type |= ACPI_FORMAT_UPPER;
+
+               case 'x':
+
+                       base = 16;
+                       break;
+
+               case 'd':
+               case 'i':
+
+                       type |= ACPI_FORMAT_SIGN;
+
+               case 'u':
+
+                       break;
+
+               case 'p':
+
+                       if (width == -1) {
+                               width = 2 * sizeof(void *);
+                               type |= ACPI_FORMAT_ZERO;
+                       }
+
+                       p = va_arg(args, void *);
+                       pos = acpi_ut_format_number(pos, end,
+                                                   ACPI_TO_INTEGER(p), 16,
+                                                   width, precision, type);
+                       continue;
+
+               default:
+
+                       pos = acpi_ut_bound_string_output(pos, end, '%');
+                       if (*format) {
+                               pos =
+                                   acpi_ut_bound_string_output(pos, end,
+                                                               *format);
+                       } else {
+                               --format;
+                       }
+                       continue;
+               }
+
+               if (qualifier == 'L') {
+                       number = va_arg(args, u64);
+                       if (type & ACPI_FORMAT_SIGN) {
+                               number = (s64) number;
+                       }
+               } else if (qualifier == 'l') {
+                       number = va_arg(args, unsigned long);
+                       if (type & ACPI_FORMAT_SIGN) {
+                               number = (s32) number;
+                       }
+               } else if (qualifier == 'h') {
+                       number = (u16)va_arg(args, int);
+                       if (type & ACPI_FORMAT_SIGN) {
+                               number = (s16) number;
+                       }
+               } else {
+                       number = va_arg(args, unsigned int);
+                       if (type & ACPI_FORMAT_SIGN) {
+                               number = (signed int)number;
+                       }
+               }
+
+               pos = acpi_ut_format_number(pos, end, number, base,
+                                           width, precision, type);
+       }
+
+       if (size > 0) {
+               if (pos < end) {
+                       *pos = '\0';
+               } else {
+                       end[-1] = '\0';
+               }
+       }
+
+       return (ACPI_PTR_DIFF(pos, string));
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_snprintf
+ *
+ * PARAMETERS:  string              - String with boundary
+ *              size                - Boundary of the string
+ *              Format, ...         - Standard printf format
+ *
+ * RETURN:      Number of bytes actually written.
+ *
+ * DESCRIPTION: Formatted output to a string.
+ *
+ ******************************************************************************/
+
+int acpi_ut_snprintf(char *string, acpi_size size, const char *format, ...)
+{
+       va_list args;
+       int length;
+
+       va_start(args, format);
+       length = acpi_ut_vsnprintf(string, size, format, args);
+       va_end(args);
+
+       return (length);
+}
+
+#ifdef ACPI_APPLICATION
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_file_vprintf
+ *
+ * PARAMETERS:  file                - File descriptor
+ *              format              - Standard printf format
+ *              args                - Argument list
+ *
+ * RETURN:      Number of bytes actually written.
+ *
+ * DESCRIPTION: Formatted output to a file using argument list pointer.
+ *
+ ******************************************************************************/
+
+int acpi_ut_file_vprintf(ACPI_FILE file, const char *format, va_list args)
+{
+       acpi_cpu_flags flags;
+       int length;
+
+       flags = acpi_os_acquire_lock(acpi_gbl_print_lock);
+       length = acpi_ut_vsnprintf(acpi_gbl_print_buffer,
+                                  sizeof(acpi_gbl_print_buffer), format, args);
+
+       (void)acpi_os_write_file(file, acpi_gbl_print_buffer, length, 1);
+       acpi_os_release_lock(acpi_gbl_print_lock, flags);
+
+       return (length);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_file_printf
+ *
+ * PARAMETERS:  file                - File descriptor
+ *              Format, ...         - Standard printf format
+ *
+ * RETURN:      Number of bytes actually written.
+ *
+ * DESCRIPTION: Formatted output to a file.
+ *
+ ******************************************************************************/
+
+int acpi_ut_file_printf(ACPI_FILE file, const char *format, ...)
+{
+       va_list args;
+       int length;
+
+       va_start(args, format);
+       length = acpi_ut_file_vprintf(file, format, args);
+       va_end(args);
+
+       return (length);
+}
+#endif
index e5bcd919d4e6922d9fc7b87d42d949adb07826cf..16129c78b4891f0d9d9e2b9035748ad643cabb6a 100644 (file)
@@ -121,11 +121,11 @@ struct dentry;
 struct dentry *apei_get_debugfs_dir(void);
 
 #define apei_estatus_for_each_section(estatus, section)                        \
-       for (section = (struct acpi_generic_data *)(estatus + 1);       \
+       for (section = (struct acpi_hest_generic_data *)(estatus + 1);  \
             (void *)section - (void *)estatus < estatus->data_length;  \
             section = (void *)(section+1) + section->error_data_length)
 
-static inline u32 cper_estatus_len(struct acpi_generic_status *estatus)
+static inline u32 cper_estatus_len(struct acpi_hest_generic_status *estatus)
 {
        if (estatus->raw_data_length)
                return estatus->raw_data_offset + \
@@ -135,9 +135,9 @@ static inline u32 cper_estatus_len(struct acpi_generic_status *estatus)
 }
 
 void cper_estatus_print(const char *pfx,
-                       const struct acpi_generic_status *estatus);
-int cper_estatus_check_header(const struct acpi_generic_status *estatus);
-int cper_estatus_check(const struct acpi_generic_status *estatus);
+                       const struct acpi_hest_generic_status *estatus);
+int cper_estatus_check_header(const struct acpi_hest_generic_status *estatus);
+int cper_estatus_check(const struct acpi_hest_generic_status *estatus);
 
 int apei_osc_setup(void);
 #endif
index dab7cb7349df7e62ee24f32493d8d5a8065ad3d9..7a38d1465b618e99a1bd9d20f2cbd7e0a0080f7f 100644 (file)
 #define GHES_ESTATUS_CACHE_LEN(estatus_len)                    \
        (sizeof(struct ghes_estatus_cache) + (estatus_len))
 #define GHES_ESTATUS_FROM_CACHE(estatus_cache)                 \
-       ((struct acpi_generic_status *)                         \
+       ((struct acpi_hest_generic_status *)                            \
         ((struct ghes_estatus_cache *)(estatus_cache) + 1))
 
 #define GHES_ESTATUS_NODE_LEN(estatus_len)                     \
        (sizeof(struct ghes_estatus_node) + (estatus_len))
 #define GHES_ESTATUS_FROM_NODE(estatus_node)                   \
-       ((struct acpi_generic_status *)                         \
+       ((struct acpi_hest_generic_status *)                            \
         ((struct ghes_estatus_node *)(estatus_node) + 1))
 
 bool ghes_disable;
@@ -408,7 +408,7 @@ static void ghes_clear_estatus(struct ghes *ghes)
        ghes->flags &= ~GHES_TO_CLEAR;
 }
 
-static void ghes_handle_memory_failure(struct acpi_generic_data *gdata, int sev)
+static void ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata, int sev)
 {
 #ifdef CONFIG_ACPI_APEI_MEMORY_FAILURE
        unsigned long pfn;
@@ -441,10 +441,10 @@ static void ghes_handle_memory_failure(struct acpi_generic_data *gdata, int sev)
 }
 
 static void ghes_do_proc(struct ghes *ghes,
-                        const struct acpi_generic_status *estatus)
+                        const struct acpi_hest_generic_status *estatus)
 {
        int sev, sec_sev;
-       struct acpi_generic_data *gdata;
+       struct acpi_hest_generic_data *gdata;
 
        sev = ghes_severity(estatus->error_severity);
        apei_estatus_for_each_section(estatus, gdata) {
@@ -498,7 +498,7 @@ static void ghes_do_proc(struct ghes *ghes,
 
 static void __ghes_print_estatus(const char *pfx,
                                 const struct acpi_hest_generic *generic,
-                                const struct acpi_generic_status *estatus)
+                                const struct acpi_hest_generic_status *estatus)
 {
        static atomic_t seqno;
        unsigned int curr_seqno;
@@ -520,7 +520,7 @@ static void __ghes_print_estatus(const char *pfx,
 
 static int ghes_print_estatus(const char *pfx,
                              const struct acpi_hest_generic *generic,
-                             const struct acpi_generic_status *estatus)
+                             const struct acpi_hest_generic_status *estatus)
 {
        /* Not more than 2 messages every 5 seconds */
        static DEFINE_RATELIMIT_STATE(ratelimit_corrected, 5*HZ, 2);
@@ -542,13 +542,13 @@ static int ghes_print_estatus(const char *pfx,
  * GHES error status reporting throttle, to report more kinds of
  * errors, instead of just most frequently occurred errors.
  */
-static int ghes_estatus_cached(struct acpi_generic_status *estatus)
+static int ghes_estatus_cached(struct acpi_hest_generic_status *estatus)
 {
        u32 len;
        int i, cached = 0;
        unsigned long long now;
        struct ghes_estatus_cache *cache;
-       struct acpi_generic_status *cache_estatus;
+       struct acpi_hest_generic_status *cache_estatus;
 
        len = cper_estatus_len(estatus);
        rcu_read_lock();
@@ -573,12 +573,12 @@ static int ghes_estatus_cached(struct acpi_generic_status *estatus)
 
 static struct ghes_estatus_cache *ghes_estatus_cache_alloc(
        struct acpi_hest_generic *generic,
-       struct acpi_generic_status *estatus)
+       struct acpi_hest_generic_status *estatus)
 {
        int alloced;
        u32 len, cache_len;
        struct ghes_estatus_cache *cache;
-       struct acpi_generic_status *cache_estatus;
+       struct acpi_hest_generic_status *cache_estatus;
 
        alloced = atomic_add_return(1, &ghes_estatus_cache_alloced);
        if (alloced > GHES_ESTATUS_CACHE_ALLOCED_MAX) {
@@ -621,7 +621,7 @@ static void ghes_estatus_cache_rcu_free(struct rcu_head *head)
 
 static void ghes_estatus_cache_add(
        struct acpi_hest_generic *generic,
-       struct acpi_generic_status *estatus)
+       struct acpi_hest_generic_status *estatus)
 {
        int i, slot = -1, count;
        unsigned long long now, duration, period, max_period = 0;
@@ -753,7 +753,7 @@ static void ghes_proc_in_irq(struct irq_work *irq_work)
        struct llist_node *llnode, *next;
        struct ghes_estatus_node *estatus_node;
        struct acpi_hest_generic *generic;
-       struct acpi_generic_status *estatus;
+       struct acpi_hest_generic_status *estatus;
        u32 len, node_len;
 
        llnode = llist_del_all(&ghes_estatus_llist);
@@ -786,7 +786,7 @@ static void ghes_print_queued_estatus(void)
        struct llist_node *llnode;
        struct ghes_estatus_node *estatus_node;
        struct acpi_hest_generic *generic;
-       struct acpi_generic_status *estatus;
+       struct acpi_hest_generic_status *estatus;
        u32 len, node_len;
 
        llnode = llist_del_all(&ghes_estatus_llist);
@@ -845,7 +845,7 @@ static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs)
 #ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
                u32 len, node_len;
                struct ghes_estatus_node *estatus_node;
-               struct acpi_generic_status *estatus;
+               struct acpi_hest_generic_status *estatus;
 #endif
                if (!(ghes->flags & GHES_TO_CLEAR))
                        continue;
@@ -925,7 +925,7 @@ static int ghes_probe(struct platform_device *ghes_dev)
 
        rc = -EIO;
        if (generic->error_block_length <
-           sizeof(struct acpi_generic_status)) {
+           sizeof(struct acpi_hest_generic_status)) {
                pr_warning(FW_BUG GHES_PFX "Invalid error block length: %u for generic hardware error source: %d\n",
                           generic->error_block_length,
                           generic->header.source_id);
index 3d8413d02a975f0643275a247524a0c9d7569341..36eb42e3b0bb80688a52d4748d19d762d777aec9 100644 (file)
@@ -247,75 +247,11 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
        },
 
        /*
-        * The following machines have broken backlight support when reporting
-        * the Windows 2012 OSI, so disable it until their support is fixed.
+        * These machines will power on immediately after shutdown when
+        * reporting the Windows 2012 OSI.
         */
        {
        .callback = dmi_disable_osi_win8,
-       .ident = "ASUS Zenbook Prime UX31A",
-       .matches = {
-                    DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
-                    DMI_MATCH(DMI_PRODUCT_NAME, "UX31A"),
-               },
-       },
-       {
-       .callback = dmi_disable_osi_win8,
-       .ident = "ThinkPad Edge E530",
-       .matches = {
-                    DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
-                    DMI_MATCH(DMI_PRODUCT_VERSION, "3259A2G"),
-               },
-       },
-       {
-       .callback = dmi_disable_osi_win8,
-       .ident = "ThinkPad Edge E530",
-       .matches = {
-                    DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
-                    DMI_MATCH(DMI_PRODUCT_VERSION, "3259CTO"),
-               },
-       },
-       {
-       .callback = dmi_disable_osi_win8,
-       .ident = "ThinkPad Edge E530",
-       .matches = {
-                    DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
-                    DMI_MATCH(DMI_PRODUCT_VERSION, "3259HJG"),
-               },
-       },
-       {
-       .callback = dmi_disable_osi_win8,
-       .ident = "Acer Aspire V5-573G",
-       .matches = {
-                    DMI_MATCH(DMI_SYS_VENDOR, "Acer Aspire"),
-                    DMI_MATCH(DMI_PRODUCT_VERSION, "V5-573G/Dazzle_HW"),
-               },
-       },
-       {
-       .callback = dmi_disable_osi_win8,
-       .ident = "Acer Aspire V5-572G",
-       .matches = {
-                    DMI_MATCH(DMI_SYS_VENDOR, "Acer Aspire"),
-                    DMI_MATCH(DMI_PRODUCT_VERSION, "V5-572G/Dazzle_CX"),
-               },
-       },
-       {
-       .callback = dmi_disable_osi_win8,
-       .ident = "ThinkPad T431s",
-       .matches = {
-                    DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
-                    DMI_MATCH(DMI_PRODUCT_VERSION, "20AACTO1WW"),
-               },
-       },
-       {
-       .callback = dmi_disable_osi_win8,
-       .ident = "ThinkPad T430",
-       .matches = {
-                    DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
-                    DMI_MATCH(DMI_PRODUCT_VERSION, "2349D15"),
-               },
-       },
-       {
-       .callback = dmi_disable_osi_win8,
        .ident = "Dell Inspiron 7737",
        .matches = {
                    DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
index c5bc8cfe09fa6d2beba1c4ac9ea490314f3e08f7..8581f5b84f48b6d2b66fa4b1f7c120f6b0928bf6 100644 (file)
@@ -477,9 +477,6 @@ static int __init acpi_bus_init_irq(void)
        return 0;
 }
 
-u8 acpi_gbl_permanent_mmap;
-
-
 void __init acpi_early_init(void)
 {
        acpi_status status;
index db35594d4df7a072a76e0c2fb879c324efdcedb8..6d5d1832a5880e0badf4220d7fb7e89008d5f4cb 100644 (file)
@@ -79,11 +79,13 @@ static int acpi_button_remove(struct acpi_device *device);
 static void acpi_button_notify(struct acpi_device *device, u32 event);
 
 #ifdef CONFIG_PM_SLEEP
+static int acpi_button_suspend(struct device *dev);
 static int acpi_button_resume(struct device *dev);
 #else
+#define acpi_button_suspend NULL
 #define acpi_button_resume NULL
 #endif
-static SIMPLE_DEV_PM_OPS(acpi_button_pm, NULL, acpi_button_resume);
+static SIMPLE_DEV_PM_OPS(acpi_button_pm, acpi_button_suspend, acpi_button_resume);
 
 static struct acpi_driver acpi_button_driver = {
        .name = "button",
@@ -102,6 +104,7 @@ struct acpi_button {
        struct input_dev *input;
        char phys[32];                  /* for input device */
        unsigned long pushed;
+       bool suspended;
 };
 
 static BLOCKING_NOTIFIER_HEAD(acpi_lid_notifier);
@@ -293,15 +296,19 @@ static void acpi_button_notify(struct acpi_device *device, u32 event)
                if (button->type == ACPI_BUTTON_TYPE_LID) {
                        acpi_lid_send_state(device);
                } else {
-                       int keycode = test_bit(KEY_SLEEP, input->keybit) ?
-                                               KEY_SLEEP : KEY_POWER;
+                       int keycode;
+
+                       pm_wakeup_event(&device->dev, 0);
+                       if (button->suspended)
+                               break;
 
+                       keycode = test_bit(KEY_SLEEP, input->keybit) ?
+                                               KEY_SLEEP : KEY_POWER;
                        input_report_key(input, keycode, 1);
                        input_sync(input);
                        input_report_key(input, keycode, 0);
                        input_sync(input);
 
-                       pm_wakeup_event(&device->dev, 0);
                        acpi_bus_generate_netlink_event(
                                        device->pnp.device_class,
                                        dev_name(&device->dev),
@@ -316,11 +323,21 @@ static void acpi_button_notify(struct acpi_device *device, u32 event)
 }
 
 #ifdef CONFIG_PM_SLEEP
+static int acpi_button_suspend(struct device *dev)
+{
+       struct acpi_device *device = to_acpi_device(dev);
+       struct acpi_button *button = acpi_driver_data(device);
+
+       button->suspended = true;
+       return 0;
+}
+
 static int acpi_button_resume(struct device *dev)
 {
        struct acpi_device *device = to_acpi_device(dev);
        struct acpi_button *button = acpi_driver_data(device);
 
+       button->suspended = false;
        if (button->type == ACPI_BUTTON_TYPE_LID)
                return acpi_lid_send_state(device);
        return 0;
index 49a51277f81d17dad2d612cba4d3efe3f7b18bf2..67075f800e34cb69b3a28d852b9bc15b00ffa702 100644 (file)
@@ -367,29 +367,61 @@ EXPORT_SYMBOL(acpi_bus_power_manageable);
 #ifdef CONFIG_PM
 static DEFINE_MUTEX(acpi_pm_notifier_lock);
 
+static void acpi_pm_notify_handler(acpi_handle handle, u32 val, void *not_used)
+{
+       struct acpi_device *adev;
+
+       if (val != ACPI_NOTIFY_DEVICE_WAKE)
+               return;
+
+       adev = acpi_bus_get_acpi_device(handle);
+       if (!adev)
+               return;
+
+       mutex_lock(&acpi_pm_notifier_lock);
+
+       if (adev->wakeup.flags.notifier_present) {
+               __pm_wakeup_event(adev->wakeup.ws, 0);
+               if (adev->wakeup.context.work.func)
+                       queue_pm_work(&adev->wakeup.context.work);
+       }
+
+       mutex_unlock(&acpi_pm_notifier_lock);
+
+       acpi_bus_put_acpi_device(adev);
+}
+
 /**
- * acpi_add_pm_notifier - Register PM notifier for given ACPI device.
- * @adev: ACPI device to add the notifier for.
- * @context: Context information to pass to the notifier routine.
+ * acpi_add_pm_notifier - Register PM notify handler for given ACPI device.
+ * @adev: ACPI device to add the notify handler for.
+ * @dev: Device to generate a wakeup event for while handling the notification.
+ * @work_func: Work function to execute when handling the notification.
  *
  * NOTE: @adev need not be a run-wake or wakeup device to be a valid source of
  * PM wakeup events.  For example, wakeup events may be generated for bridges
  * if one of the devices below the bridge is signaling wakeup, even if the
  * bridge itself doesn't have a wakeup GPE associated with it.
  */
-acpi_status acpi_add_pm_notifier(struct acpi_device *adev,
-                                acpi_notify_handler handler, void *context)
+acpi_status acpi_add_pm_notifier(struct acpi_device *adev, struct device *dev,
+                                void (*work_func)(struct work_struct *work))
 {
        acpi_status status = AE_ALREADY_EXISTS;
 
+       if (!dev && !work_func)
+               return AE_BAD_PARAMETER;
+
        mutex_lock(&acpi_pm_notifier_lock);
 
        if (adev->wakeup.flags.notifier_present)
                goto out;
 
-       status = acpi_install_notify_handler(adev->handle,
-                                            ACPI_SYSTEM_NOTIFY,
-                                            handler, context);
+       adev->wakeup.ws = wakeup_source_register(dev_name(&adev->dev));
+       adev->wakeup.context.dev = dev;
+       if (work_func)
+               INIT_WORK(&adev->wakeup.context.work, work_func);
+
+       status = acpi_install_notify_handler(adev->handle, ACPI_SYSTEM_NOTIFY,
+                                            acpi_pm_notify_handler, NULL);
        if (ACPI_FAILURE(status))
                goto out;
 
@@ -404,8 +436,7 @@ acpi_status acpi_add_pm_notifier(struct acpi_device *adev,
  * acpi_remove_pm_notifier - Unregister PM notifier from given ACPI device.
  * @adev: ACPI device to remove the notifier from.
  */
-acpi_status acpi_remove_pm_notifier(struct acpi_device *adev,
-                                   acpi_notify_handler handler)
+acpi_status acpi_remove_pm_notifier(struct acpi_device *adev)
 {
        acpi_status status = AE_BAD_PARAMETER;
 
@@ -416,10 +447,17 @@ acpi_status acpi_remove_pm_notifier(struct acpi_device *adev,
 
        status = acpi_remove_notify_handler(adev->handle,
                                            ACPI_SYSTEM_NOTIFY,
-                                           handler);
+                                           acpi_pm_notify_handler);
        if (ACPI_FAILURE(status))
                goto out;
 
+       if (adev->wakeup.context.work.func) {
+               cancel_work_sync(&adev->wakeup.context.work);
+               adev->wakeup.context.work.func = NULL;
+       }
+       adev->wakeup.context.dev = NULL;
+       wakeup_source_unregister(adev->wakeup.ws);
+
        adev->wakeup.flags.notifier_present = false;
 
  out:
@@ -558,7 +596,6 @@ static int acpi_dev_pm_get_state(struct device *dev, struct acpi_device *adev,
  */
 int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p, int d_max_in)
 {
-       acpi_handle handle = ACPI_HANDLE(dev);
        struct acpi_device *adev;
        int ret, d_min, d_max;
 
@@ -573,8 +610,9 @@ int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p, int d_max_in)
                        d_max_in = ACPI_STATE_D3_HOT;
        }
 
-       if (!handle || acpi_bus_get_device(handle, &adev)) {
-               dev_dbg(dev, "ACPI handle without context in %s!\n", __func__);
+       adev = ACPI_COMPANION(dev);
+       if (!adev) {
+               dev_dbg(dev, "ACPI companion missing in %s!\n", __func__);
                return -ENODEV;
        }
 
@@ -600,26 +638,25 @@ int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p, int d_max_in)
 }
 EXPORT_SYMBOL(acpi_pm_device_sleep_state);
 
-#ifdef CONFIG_PM_RUNTIME
 /**
- * acpi_wakeup_device - Wakeup notification handler for ACPI devices.
- * @handle: ACPI handle of the device the notification is for.
- * @event: Type of the signaled event.
- * @context: Device corresponding to @handle.
+ * acpi_pm_notify_work_func - ACPI devices wakeup notification work function.
+ * @work: Work item to handle.
  */
-static void acpi_wakeup_device(acpi_handle handle, u32 event, void *context)
+static void acpi_pm_notify_work_func(struct work_struct *work)
 {
-       struct device *dev = context;
+       struct device *dev;
 
-       if (event == ACPI_NOTIFY_DEVICE_WAKE && dev) {
+       dev = container_of(work, struct acpi_device_wakeup_context, work)->dev;
+       if (dev) {
                pm_wakeup_event(dev, 0);
                pm_runtime_resume(dev);
        }
 }
 
 /**
- * __acpi_device_run_wake - Enable/disable runtime remote wakeup for device.
- * @adev: ACPI device to enable/disable the remote wakeup for.
+ * acpi_device_wakeup - Enable/disable wakeup functionality for device.
+ * @adev: ACPI device to enable/disable wakeup functionality for.
+ * @target_state: State the system is transitioning into.
  * @enable: Whether to enable or disable the wakeup functionality.
  *
  * Enable/disable the GPE associated with @adev so that it can generate
@@ -629,7 +666,8 @@ static void acpi_wakeup_device(acpi_handle handle, u32 event, void *context)
  * Callers must ensure that @adev is a valid ACPI device node before executing
  * this function.
  */
-int __acpi_device_run_wake(struct acpi_device *adev, bool enable)
+static int acpi_device_wakeup(struct acpi_device *adev, u32 target_state,
+                             bool enable)
 {
        struct acpi_device_wakeup *wakeup = &adev->wakeup;
 
@@ -637,7 +675,7 @@ int __acpi_device_run_wake(struct acpi_device *adev, bool enable)
                acpi_status res;
                int error;
 
-               error = acpi_enable_wakeup_device_power(adev, ACPI_STATE_S0);
+               error = acpi_enable_wakeup_device_power(adev, target_state);
                if (error)
                        return error;
 
@@ -653,6 +691,7 @@ int __acpi_device_run_wake(struct acpi_device *adev, bool enable)
        return 0;
 }
 
+#ifdef CONFIG_PM_RUNTIME
 /**
  * acpi_pm_device_run_wake - Enable/disable remote wakeup for given device.
  * @dev: Device to enable/disable the platform to wake up.
@@ -661,41 +700,22 @@ int __acpi_device_run_wake(struct acpi_device *adev, bool enable)
 int acpi_pm_device_run_wake(struct device *phys_dev, bool enable)
 {
        struct acpi_device *adev;
-       acpi_handle handle;
 
        if (!device_run_wake(phys_dev))
                return -EINVAL;
 
-       handle = ACPI_HANDLE(phys_dev);
-       if (!handle || acpi_bus_get_device(handle, &adev)) {
-               dev_dbg(phys_dev, "ACPI handle without context in %s!\n",
-                       __func__);
+       adev = ACPI_COMPANION(phys_dev);
+       if (!adev) {
+               dev_dbg(phys_dev, "ACPI companion missing in %s!\n", __func__);
                return -ENODEV;
        }
 
-       return __acpi_device_run_wake(adev, enable);
+       return acpi_device_wakeup(adev, enable, ACPI_STATE_S0);
 }
 EXPORT_SYMBOL(acpi_pm_device_run_wake);
-#else
-static inline void acpi_wakeup_device(acpi_handle handle, u32 event,
-                                     void *context) {}
 #endif /* CONFIG_PM_RUNTIME */
 
 #ifdef CONFIG_PM_SLEEP
-/**
- * __acpi_device_sleep_wake - Enable or disable device to wake up the system.
- * @dev: Device to enable/desible to wake up the system.
- * @target_state: System state the device is supposed to wake up from.
- * @enable: Whether to enable or disable @dev to wake up the system.
- */
-int __acpi_device_sleep_wake(struct acpi_device *adev, u32 target_state,
-                            bool enable)
-{
-       return enable ?
-               acpi_enable_wakeup_device_power(adev, target_state) :
-               acpi_disable_wakeup_device_power(adev);
-}
-
 /**
  * acpi_pm_device_sleep_wake - Enable or disable device to wake up the system.
  * @dev: Device to enable/desible to wake up the system from sleep states.
@@ -703,21 +723,19 @@ int __acpi_device_sleep_wake(struct acpi_device *adev, u32 target_state,
  */
 int acpi_pm_device_sleep_wake(struct device *dev, bool enable)
 {
-       acpi_handle handle;
        struct acpi_device *adev;
        int error;
 
        if (!device_can_wakeup(dev))
                return -EINVAL;
 
-       handle = ACPI_HANDLE(dev);
-       if (!handle || acpi_bus_get_device(handle, &adev)) {
-               dev_dbg(dev, "ACPI handle without context in %s!\n", __func__);
+       adev = ACPI_COMPANION(dev);
+       if (!adev) {
+               dev_dbg(dev, "ACPI companion missing in %s!\n", __func__);
                return -ENODEV;
        }
 
-       error = __acpi_device_sleep_wake(adev, acpi_target_system_state(),
-                                        enable);
+       error = acpi_device_wakeup(adev, acpi_target_system_state(), enable);
        if (!error)
                dev_info(dev, "System wakeup %s by ACPI\n",
                                enable ? "enabled" : "disabled");
@@ -775,13 +793,13 @@ int acpi_dev_runtime_suspend(struct device *dev)
 
        remote_wakeup = dev_pm_qos_flags(dev, PM_QOS_FLAG_REMOTE_WAKEUP) >
                                PM_QOS_FLAGS_NONE;
-       error = __acpi_device_run_wake(adev, remote_wakeup);
+       error = acpi_device_wakeup(adev, ACPI_STATE_S0, remote_wakeup);
        if (remote_wakeup && error)
                return -EAGAIN;
 
        error = acpi_dev_pm_low_power(dev, adev, ACPI_STATE_S0);
        if (error)
-               __acpi_device_run_wake(adev, false);
+               acpi_device_wakeup(adev, ACPI_STATE_S0, false);
 
        return error;
 }
@@ -804,7 +822,7 @@ int acpi_dev_runtime_resume(struct device *dev)
                return 0;
 
        error = acpi_dev_pm_full_power(adev);
-       __acpi_device_run_wake(adev, false);
+       acpi_device_wakeup(adev, ACPI_STATE_S0, false);
        return error;
 }
 EXPORT_SYMBOL_GPL(acpi_dev_runtime_resume);
@@ -860,13 +878,13 @@ int acpi_dev_suspend_late(struct device *dev)
 
        target_state = acpi_target_system_state();
        wakeup = device_may_wakeup(dev);
-       error = __acpi_device_sleep_wake(adev, target_state, wakeup);
+       error = acpi_device_wakeup(adev, target_state, wakeup);
        if (wakeup && error)
                return error;
 
        error = acpi_dev_pm_low_power(dev, adev, target_state);
        if (error)
-               __acpi_device_sleep_wake(adev, ACPI_STATE_UNKNOWN, false);
+               acpi_device_wakeup(adev, ACPI_STATE_UNKNOWN, false);
 
        return error;
 }
@@ -889,7 +907,7 @@ int acpi_dev_resume_early(struct device *dev)
                return 0;
 
        error = acpi_dev_pm_full_power(adev);
-       __acpi_device_sleep_wake(adev, ACPI_STATE_UNKNOWN, false);
+       acpi_device_wakeup(adev, ACPI_STATE_UNKNOWN, false);
        return error;
 }
 EXPORT_SYMBOL_GPL(acpi_dev_resume_early);
@@ -1048,11 +1066,11 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
        if (dev->pm_domain)
                return -EEXIST;
 
-       acpi_add_pm_notifier(adev, acpi_wakeup_device, dev);
+       acpi_add_pm_notifier(adev, dev, acpi_pm_notify_work_func);
        dev->pm_domain = &acpi_general_pm_domain;
        if (power_on) {
                acpi_dev_pm_full_power(adev);
-               __acpi_device_run_wake(adev, false);
+               acpi_device_wakeup(adev, ACPI_STATE_S0, false);
        }
        return 0;
 }
@@ -1076,7 +1094,7 @@ void acpi_dev_pm_detach(struct device *dev, bool power_off)
 
        if (adev && dev->pm_domain == &acpi_general_pm_domain) {
                dev->pm_domain = NULL;
-               acpi_remove_pm_notifier(adev, acpi_wakeup_device);
+               acpi_remove_pm_notifier(adev);
                if (power_off) {
                        /*
                         * If the device's PM QoS resume latency limit or flags
@@ -1086,7 +1104,7 @@ void acpi_dev_pm_detach(struct device *dev, bool power_off)
                         */
                        dev_pm_qos_hide_latency_limit(dev);
                        dev_pm_qos_hide_flags(dev);
-                       __acpi_device_run_wake(adev, false);
+                       acpi_device_wakeup(adev, ACPI_STATE_S0, false);
                        acpi_dev_pm_low_power(dev, adev, ACPI_STATE_S0);
                }
        }
index 7de5b603f272b204305899e0147fce9b7c4042a9..4c5cf77e7576ea10b9e11a07b7fc5a9adbb60214 100644 (file)
@@ -84,8 +84,6 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
                             int type, unsigned long long sta);
 void acpi_device_add_finalize(struct acpi_device *device);
 void acpi_free_pnp_ids(struct acpi_device_pnp *pnp);
-int acpi_bind_one(struct device *dev, struct acpi_device *adev);
-int acpi_unbind_one(struct device *dev);
 bool acpi_device_is_present(struct acpi_device *adev);
 bool acpi_device_is_battery(struct acpi_device *adev);
 
@@ -108,7 +106,12 @@ int acpi_power_transition(struct acpi_device *device, int state);
 int acpi_device_update_power(struct acpi_device *device, int *state_p);
 
 int acpi_wakeup_device_init(void);
+
+#ifdef CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC
 void acpi_early_processor_set_pdc(void);
+#else
+static inline void acpi_early_processor_set_pdc(void) {}
+#endif
 
 /* --------------------------------------------------------------------------
                                   Embedded Controller
index bad25b070fe0bfe8b0204cce2ddd5ce30f07ec91..3abe9b223ba717a644ecfd0a4edf401004f24807 100644 (file)
@@ -259,12 +259,14 @@ acpi_physical_address __init acpi_os_get_root_pointer(void)
                               "System description tables not found\n");
                        return 0;
                }
-       } else {
+       } else if (IS_ENABLED(CONFIG_ACPI_LEGACY_TABLES_LOOKUP)) {
                acpi_physical_address pa = 0;
 
                acpi_find_root_pointer(&pa);
                return pa;
        }
+
+       return 0;
 }
 
 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
index d388f13d48b43634acaaddaf526f037051db78c0..e6ae603ed1a18594b2d4766371d748d5a0cfedc3 100644 (file)
@@ -593,7 +593,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
        if (no_aspm)
                pcie_no_aspm();
 
-       pci_acpi_add_bus_pm_notifier(device, root->bus);
+       pci_acpi_add_bus_pm_notifier(device);
        if (device->wakeup.flags.run_wake)
                device_set_run_wake(root->bus->bridge, true);
 
index 71e2065639a6bcc74134a4447c803cbfac1c7841..00f48d13a51630df09afced52ac4c77abdec5240 100644 (file)
@@ -4,17 +4,11 @@
  *
  *     Alex Chiang <achiang@hp.com>
  *     - Unified x86/ia64 implementations
- *     Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
- *     - Added _PDC for platforms with Intel CPUs
  */
 #include <linux/export.h>
-#include <linux/dmi.h>
-#include <linux/slab.h>
 #include <linux/acpi.h>
 #include <acpi/processor.h>
 
-#include "internal.h"
-
 #define _COMPONENT             ACPI_PROCESSOR_COMPONENT
 ACPI_MODULE_NAME("processor_core");
 
@@ -208,195 +202,3 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
        return acpi_map_cpuid(apic_id, acpi_id);
 }
 EXPORT_SYMBOL_GPL(acpi_get_cpuid);
-
-static bool __init processor_physically_present(acpi_handle handle)
-{
-       int cpuid, type;
-       u32 acpi_id;
-       acpi_status status;
-       acpi_object_type acpi_type;
-       unsigned long long tmp;
-       union acpi_object object = { 0 };
-       struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
-
-       status = acpi_get_type(handle, &acpi_type);
-       if (ACPI_FAILURE(status))
-               return false;
-
-       switch (acpi_type) {
-       case ACPI_TYPE_PROCESSOR:
-               status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
-               if (ACPI_FAILURE(status))
-                       return false;
-               acpi_id = object.processor.proc_id;
-               break;
-       case ACPI_TYPE_DEVICE:
-               status = acpi_evaluate_integer(handle, "_UID", NULL, &tmp);
-               if (ACPI_FAILURE(status))
-                       return false;
-               acpi_id = tmp;
-               break;
-       default:
-               return false;
-       }
-
-       type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0;
-       cpuid = acpi_get_cpuid(handle, type, acpi_id);
-
-       if (cpuid == -1)
-               return false;
-
-       return true;
-}
-
-static void acpi_set_pdc_bits(u32 *buf)
-{
-       buf[0] = ACPI_PDC_REVISION_ID;
-       buf[1] = 1;
-
-       /* Enable coordination with firmware's _TSD info */
-       buf[2] = ACPI_PDC_SMP_T_SWCOORD;
-
-       /* Twiddle arch-specific bits needed for _PDC */
-       arch_acpi_set_pdc_bits(buf);
-}
-
-static struct acpi_object_list *acpi_processor_alloc_pdc(void)
-{
-       struct acpi_object_list *obj_list;
-       union acpi_object *obj;
-       u32 *buf;
-
-       /* allocate and initialize pdc. It will be used later. */
-       obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL);
-       if (!obj_list) {
-               printk(KERN_ERR "Memory allocation error\n");
-               return NULL;
-       }
-
-       obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL);
-       if (!obj) {
-               printk(KERN_ERR "Memory allocation error\n");
-               kfree(obj_list);
-               return NULL;
-       }
-
-       buf = kmalloc(12, GFP_KERNEL);
-       if (!buf) {
-               printk(KERN_ERR "Memory allocation error\n");
-               kfree(obj);
-               kfree(obj_list);
-               return NULL;
-       }
-
-       acpi_set_pdc_bits(buf);
-
-       obj->type = ACPI_TYPE_BUFFER;
-       obj->buffer.length = 12;
-       obj->buffer.pointer = (u8 *) buf;
-       obj_list->count = 1;
-       obj_list->pointer = obj;
-
-       return obj_list;
-}
-
-/*
- * _PDC is required for a BIOS-OS handshake for most of the newer
- * ACPI processor features.
- */
-static acpi_status
-acpi_processor_eval_pdc(acpi_handle handle, struct acpi_object_list *pdc_in)
-{
-       acpi_status status = AE_OK;
-
-       if (boot_option_idle_override == IDLE_NOMWAIT) {
-               /*
-                * If mwait is disabled for CPU C-states, the C2C3_FFH access
-                * mode will be disabled in the parameter of _PDC object.
-                * Of course C1_FFH access mode will also be disabled.
-                */
-               union acpi_object *obj;
-               u32 *buffer = NULL;
-
-               obj = pdc_in->pointer;
-               buffer = (u32 *)(obj->buffer.pointer);
-               buffer[2] &= ~(ACPI_PDC_C_C2C3_FFH | ACPI_PDC_C_C1_FFH);
-
-       }
-       status = acpi_evaluate_object(handle, "_PDC", pdc_in, NULL);
-
-       if (ACPI_FAILURE(status))
-               ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-                   "Could not evaluate _PDC, using legacy perf. control.\n"));
-
-       return status;
-}
-
-void acpi_processor_set_pdc(acpi_handle handle)
-{
-       struct acpi_object_list *obj_list;
-
-       if (arch_has_acpi_pdc() == false)
-               return;
-
-       obj_list = acpi_processor_alloc_pdc();
-       if (!obj_list)
-               return;
-
-       acpi_processor_eval_pdc(handle, obj_list);
-
-       kfree(obj_list->pointer->buffer.pointer);
-       kfree(obj_list->pointer);
-       kfree(obj_list);
-}
-
-static acpi_status __init
-early_init_pdc(acpi_handle handle, u32 lvl, void *context, void **rv)
-{
-       if (processor_physically_present(handle) == false)
-               return AE_OK;
-
-       acpi_processor_set_pdc(handle);
-       return AE_OK;
-}
-
-#if defined(CONFIG_X86) || defined(CONFIG_IA64)
-static int __init set_no_mwait(const struct dmi_system_id *id)
-{
-       pr_notice(PREFIX "%s detected - disabling mwait for CPU C-states\n",
-                 id->ident);
-       boot_option_idle_override = IDLE_NOMWAIT;
-       return 0;
-}
-
-static struct dmi_system_id processor_idle_dmi_table[] __initdata = {
-       {
-       set_no_mwait, "Extensa 5220", {
-       DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
-       DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
-       DMI_MATCH(DMI_PRODUCT_VERSION, "0100"),
-       DMI_MATCH(DMI_BOARD_NAME, "Columbia") }, NULL},
-       {},
-};
-
-static void __init processor_dmi_check(void)
-{
-       /*
-        * Check whether the system is DMI table. If yes, OSPM
-        * should not use mwait for CPU-states.
-        */
-       dmi_check_system(processor_idle_dmi_table);
-}
-#else
-static inline void processor_dmi_check(void) {}
-#endif
-
-void __init acpi_early_processor_set_pdc(void)
-{
-       processor_dmi_check();
-
-       acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
-                           ACPI_UINT32_MAX,
-                           early_init_pdc, NULL, NULL, NULL);
-       acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID, early_init_pdc, NULL, NULL);
-}
diff --git a/drivers/acpi/processor_pdc.c b/drivers/acpi/processor_pdc.c
new file mode 100644 (file)
index 0000000..e5dd808
--- /dev/null
@@ -0,0 +1,206 @@
+/*
+ * Copyright (C) 2005 Intel Corporation
+ * Copyright (C) 2009 Hewlett-Packard Development Company, L.P.
+ *
+ *      Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
+ *      - Added _PDC for platforms with Intel CPUs
+ */
+
+#define pr_fmt(fmt) "ACPI: " fmt
+
+#include <linux/dmi.h>
+#include <linux/slab.h>
+#include <linux/acpi.h>
+#include <acpi/processor.h>
+
+#include "internal.h"
+
+#define _COMPONENT              ACPI_PROCESSOR_COMPONENT
+ACPI_MODULE_NAME("processor_pdc");
+
+static bool __init processor_physically_present(acpi_handle handle)
+{
+       int cpuid, type;
+       u32 acpi_id;
+       acpi_status status;
+       acpi_object_type acpi_type;
+       unsigned long long tmp;
+       union acpi_object object = { 0 };
+       struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
+
+       status = acpi_get_type(handle, &acpi_type);
+       if (ACPI_FAILURE(status))
+               return false;
+
+       switch (acpi_type) {
+       case ACPI_TYPE_PROCESSOR:
+               status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
+               if (ACPI_FAILURE(status))
+                       return false;
+               acpi_id = object.processor.proc_id;
+               break;
+       case ACPI_TYPE_DEVICE:
+               status = acpi_evaluate_integer(handle, "_UID", NULL, &tmp);
+               if (ACPI_FAILURE(status))
+                       return false;
+               acpi_id = tmp;
+               break;
+       default:
+               return false;
+       }
+
+       type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0;
+       cpuid = acpi_get_cpuid(handle, type, acpi_id);
+
+       if (cpuid == -1)
+               return false;
+
+       return true;
+}
+
+static void acpi_set_pdc_bits(u32 *buf)
+{
+       buf[0] = ACPI_PDC_REVISION_ID;
+       buf[1] = 1;
+
+       /* Enable coordination with firmware's _TSD info */
+       buf[2] = ACPI_PDC_SMP_T_SWCOORD;
+
+       /* Twiddle arch-specific bits needed for _PDC */
+       arch_acpi_set_pdc_bits(buf);
+}
+
+static struct acpi_object_list *acpi_processor_alloc_pdc(void)
+{
+       struct acpi_object_list *obj_list;
+       union acpi_object *obj;
+       u32 *buf;
+
+       /* allocate and initialize pdc. It will be used later. */
+       obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL);
+       if (!obj_list)
+               goto out;
+
+       obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL);
+       if (!obj) {
+               kfree(obj_list);
+               goto out;
+       }
+
+       buf = kmalloc(12, GFP_KERNEL);
+       if (!buf) {
+               kfree(obj);
+               kfree(obj_list);
+               goto out;
+       }
+
+       acpi_set_pdc_bits(buf);
+
+       obj->type = ACPI_TYPE_BUFFER;
+       obj->buffer.length = 12;
+       obj->buffer.pointer = (u8 *) buf;
+       obj_list->count = 1;
+       obj_list->pointer = obj;
+
+       return obj_list;
+out:
+       pr_err("Memory allocation error\n");
+       return NULL;
+}
+
+/*
+ * _PDC is required for a BIOS-OS handshake for most of the newer
+ * ACPI processor features.
+ */
+static acpi_status
+acpi_processor_eval_pdc(acpi_handle handle, struct acpi_object_list *pdc_in)
+{
+       acpi_status status = AE_OK;
+
+       if (boot_option_idle_override == IDLE_NOMWAIT) {
+               /*
+                * If mwait is disabled for CPU C-states, the C2C3_FFH access
+                * mode will be disabled in the parameter of _PDC object.
+                * Of course C1_FFH access mode will also be disabled.
+                */
+               union acpi_object *obj;
+               u32 *buffer = NULL;
+
+               obj = pdc_in->pointer;
+               buffer = (u32 *)(obj->buffer.pointer);
+               buffer[2] &= ~(ACPI_PDC_C_C2C3_FFH | ACPI_PDC_C_C1_FFH);
+
+       }
+       status = acpi_evaluate_object(handle, "_PDC", pdc_in, NULL);
+
+       if (ACPI_FAILURE(status))
+               ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+                   "Could not evaluate _PDC, using legacy perf. control.\n"));
+
+       return status;
+}
+
+void acpi_processor_set_pdc(acpi_handle handle)
+{
+       struct acpi_object_list *obj_list;
+
+       if (arch_has_acpi_pdc() == false)
+               return;
+
+       obj_list = acpi_processor_alloc_pdc();
+       if (!obj_list)
+               return;
+
+       acpi_processor_eval_pdc(handle, obj_list);
+
+       kfree(obj_list->pointer->buffer.pointer);
+       kfree(obj_list->pointer);
+       kfree(obj_list);
+}
+
+static acpi_status __init
+early_init_pdc(acpi_handle handle, u32 lvl, void *context, void **rv)
+{
+       if (processor_physically_present(handle) == false)
+               return AE_OK;
+
+       acpi_processor_set_pdc(handle);
+       return AE_OK;
+}
+
+static int __init set_no_mwait(const struct dmi_system_id *id)
+{
+       pr_notice("%s detected - disabling mwait for CPU C-states\n",
+                 id->ident);
+       boot_option_idle_override = IDLE_NOMWAIT;
+       return 0;
+}
+
+static struct dmi_system_id processor_idle_dmi_table[] __initdata = {
+       {
+       set_no_mwait, "Extensa 5220", {
+       DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
+       DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+       DMI_MATCH(DMI_PRODUCT_VERSION, "0100"),
+       DMI_MATCH(DMI_BOARD_NAME, "Columbia") }, NULL},
+       {},
+};
+
+static void __init processor_dmi_check(void)
+{
+       /*
+        * Check whether the system is DMI table. If yes, OSPM
+        * should not use mwait for CPU-states.
+        */
+       dmi_check_system(processor_idle_dmi_table);
+}
+
+void __init acpi_early_processor_set_pdc(void)
+{
+       processor_dmi_check();
+
+       acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
+                           ACPI_UINT32_MAX,
+                           early_init_pdc, NULL, NULL, NULL);
+       acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID, early_init_pdc, NULL, NULL);
+}
index 3ffc9306538bca385d0a7382e94438d27f9d0cf3..5d592e17d760ebe940f3a64d9c6a30f5d8f158b8 100644 (file)
@@ -1423,14 +1423,13 @@ static int acpi_bus_extract_wakeup_device_power_package(acpi_handle handle,
                        wakeup->sleep_state = sleep_state;
                }
        }
-       acpi_setup_gpe_for_wake(handle, wakeup->gpe_device, wakeup->gpe_number);
 
  out:
        kfree(buffer.pointer);
        return err;
 }
 
-static void acpi_bus_set_run_wake_flags(struct acpi_device *device)
+static void acpi_wakeup_gpe_init(struct acpi_device *device)
 {
        struct acpi_device_id button_device_ids[] = {
                {"PNP0C0C", 0},
@@ -1438,29 +1437,33 @@ static void acpi_bus_set_run_wake_flags(struct acpi_device *device)
                {"PNP0C0E", 0},
                {"", 0},
        };
+       struct acpi_device_wakeup *wakeup = &device->wakeup;
        acpi_status status;
        acpi_event_status event_status;
 
-       device->wakeup.flags.notifier_present = 0;
+       wakeup->flags.notifier_present = 0;
 
        /* Power button, Lid switch always enable wakeup */
        if (!acpi_match_device_ids(device, button_device_ids)) {
-               device->wakeup.flags.run_wake = 1;
+               wakeup->flags.run_wake = 1;
                if (!acpi_match_device_ids(device, &button_device_ids[1])) {
                        /* Do not use Lid/sleep button for S5 wakeup */
-                       if (device->wakeup.sleep_state == ACPI_STATE_S5)
-                               device->wakeup.sleep_state = ACPI_STATE_S4;
+                       if (wakeup->sleep_state == ACPI_STATE_S5)
+                               wakeup->sleep_state = ACPI_STATE_S4;
                }
+               acpi_mark_gpe_for_wake(wakeup->gpe_device, wakeup->gpe_number);
                device_set_wakeup_capable(&device->dev, true);
                return;
        }
 
-       status = acpi_get_gpe_status(device->wakeup.gpe_device,
-                                       device->wakeup.gpe_number,
-                                               &event_status);
-       if (status == AE_OK)
-               device->wakeup.flags.run_wake =
-                               !!(event_status & ACPI_EVENT_FLAG_HANDLE);
+       acpi_setup_gpe_for_wake(device->handle, wakeup->gpe_device,
+                               wakeup->gpe_number);
+       status = acpi_get_gpe_status(wakeup->gpe_device, wakeup->gpe_number,
+                                    &event_status);
+       if (ACPI_FAILURE(status))
+               return;
+
+       wakeup->flags.run_wake = !!(event_status & ACPI_EVENT_FLAG_HANDLE);
 }
 
 static void acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
@@ -1480,7 +1483,7 @@ static void acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
 
        device->wakeup.flags.valid = 1;
        device->wakeup.prepare_count = 0;
-       acpi_bus_set_run_wake_flags(device);
+       acpi_wakeup_gpe_init(device);
        /* Call _PSW/_DSW object to disable its ability to wake the sleeping
         * system for the ACPI device with the _PRW object.
         * The _PSW object is depreciated in ACPI 3.0 and is replaced by _DSW.
index b3e3cc73ba796edf49d856c203e441c5ab818420..54da4a3fe65e65d4b6334d93b82244f95c245b1f 100644 (file)
@@ -322,6 +322,11 @@ static struct dmi_system_id acpisleep_dmi_table[] __initdata = {
 
 static void acpi_sleep_dmi_check(void)
 {
+       int year;
+
+       if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && year >= 2012)
+               acpi_nvs_nosave_s3();
+
        dmi_check_system(acpisleep_dmi_table);
 }
 
index 071c1dfb93f3b5a7480133f0bdead2e9009ccb4f..18c0e6920eb41fdce3374c48d8946ed12f89fc76 100644 (file)
@@ -68,7 +68,7 @@ MODULE_AUTHOR("Bruno Ducrot");
 MODULE_DESCRIPTION("ACPI Video Driver");
 MODULE_LICENSE("GPL");
 
-static bool brightness_switch_enabled;
+static bool brightness_switch_enabled = 1;
 module_param(brightness_switch_enabled, bool, 0644);
 
 /*
@@ -204,6 +204,8 @@ struct acpi_video_device {
        struct acpi_video_device_flags flags;
        struct acpi_video_device_cap cap;
        struct list_head entry;
+       struct delayed_work switch_brightness_work;
+       int switch_brightness_event;
        struct acpi_video_bus *video;
        struct acpi_device *dev;
        struct acpi_video_device_brightness *brightness;
@@ -230,8 +232,7 @@ static int acpi_video_device_lcd_get_level_current(
                        unsigned long long *level, bool raw);
 static int acpi_video_get_next_level(struct acpi_video_device *device,
                                     u32 level_current, u32 event);
-static int acpi_video_switch_brightness(struct acpi_video_device *device,
-                                        int event);
+static void acpi_video_switch_brightness(struct work_struct *work);
 
 static bool acpi_video_use_native_backlight(void)
 {
@@ -275,6 +276,7 @@ static int acpi_video_set_brightness(struct backlight_device *bd)
        int request_level = bd->props.brightness + 2;
        struct acpi_video_device *vd = bl_get_data(bd);
 
+       cancel_delayed_work(&vd->switch_brightness_work);
        return acpi_video_device_lcd_set_level(vd,
                                vd->brightness->levels[request_level]);
 }
@@ -459,6 +461,14 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
                DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7720"),
                },
        },
+       {
+        .callback = video_set_use_native_backlight,
+        .ident = "ThinkPad X230",
+        .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+               DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X230"),
+               },
+       },
        {
         .callback = video_set_use_native_backlight,
         .ident = "ThinkPad T430 and T430s",
@@ -469,10 +479,42 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
        },
        {
         .callback = video_set_use_native_backlight,
-        .ident = "ThinkPad X230",
+        .ident = "ThinkPad T430",
         .matches = {
                DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
-               DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X230"),
+               DMI_MATCH(DMI_PRODUCT_VERSION, "2349D15"),
+               },
+       },
+       {
+        .callback = video_set_use_native_backlight,
+        .ident = "ThinkPad T431s",
+        .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+               DMI_MATCH(DMI_PRODUCT_VERSION, "20AACTO1WW"),
+               },
+       },
+       {
+        .callback = video_set_use_native_backlight,
+        .ident = "ThinkPad Edge E530",
+        .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+               DMI_MATCH(DMI_PRODUCT_VERSION, "3259A2G"),
+               },
+       },
+       {
+        .callback = video_set_use_native_backlight,
+        .ident = "ThinkPad Edge E530",
+        .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+               DMI_MATCH(DMI_PRODUCT_VERSION, "3259CTO"),
+               },
+       },
+       {
+        .callback = video_set_use_native_backlight,
+        .ident = "ThinkPad Edge E530",
+        .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+               DMI_MATCH(DMI_PRODUCT_VERSION, "3259HJG"),
                },
        },
        {
@@ -571,6 +613,30 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
                DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate B113"),
                },
        },
+       {
+        .callback = video_set_use_native_backlight,
+        .ident = "Acer Aspire V5-572G",
+        .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "Acer Aspire"),
+               DMI_MATCH(DMI_PRODUCT_VERSION, "V5-572G/Dazzle_CX"),
+               },
+       },
+       {
+        .callback = video_set_use_native_backlight,
+        .ident = "Acer Aspire V5-573G",
+        .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "Acer Aspire"),
+               DMI_MATCH(DMI_PRODUCT_VERSION, "V5-573G/Dazzle_HW"),
+               },
+       },
+       {
+        .callback = video_set_use_native_backlight,
+        .ident = "ASUS Zenbook Prime UX31A",
+        .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+               DMI_MATCH(DMI_PRODUCT_NAME, "UX31A"),
+               },
+       },
        {
        .callback = video_set_use_native_backlight,
        .ident = "HP ProBook 4340s",
@@ -581,6 +647,14 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
        },
        {
        .callback = video_set_use_native_backlight,
+       .ident = "HP ProBook 4540s",
+       .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+               DMI_MATCH(DMI_PRODUCT_VERSION, "HP ProBook 4540s"),
+               },
+       },
+       {
+       .callback = video_set_use_native_backlight,
        .ident = "HP ProBook 2013 models",
        .matches = {
                DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
@@ -1180,6 +1254,8 @@ acpi_video_bus_get_one_device(struct acpi_device *device,
        data->device_id = device_id;
        data->video = video;
        data->dev = device;
+       INIT_DELAYED_WORK(&data->switch_brightness_work,
+                         acpi_video_switch_brightness);
 
        attribute = acpi_video_get_device_attr(video, device_id);
 
@@ -1402,15 +1478,18 @@ acpi_video_get_next_level(struct acpi_video_device *device,
        }
 }
 
-static int
-acpi_video_switch_brightness(struct acpi_video_device *device, int event)
+static void
+acpi_video_switch_brightness(struct work_struct *work)
 {
+       struct acpi_video_device *device = container_of(to_delayed_work(work),
+                            struct acpi_video_device, switch_brightness_work);
        unsigned long long level_current, level_next;
+       int event = device->switch_brightness_event;
        int result = -EINVAL;
 
        /* no warning message if acpi_backlight=vendor or a quirk is used */
        if (!acpi_video_verify_backlight_support())
-               return 0;
+               return;
 
        if (!device->brightness)
                goto out;
@@ -1432,8 +1511,6 @@ acpi_video_switch_brightness(struct acpi_video_device *device, int event)
 out:
        if (result)
                printk(KERN_ERR PREFIX "Failed to switch the brightness\n");
-
-       return result;
 }
 
 int acpi_video_get_edid(struct acpi_device *device, int type, int device_id,
@@ -1601,6 +1678,16 @@ static void acpi_video_bus_notify(struct acpi_device *device, u32 event)
        return;
 }
 
+static void brightness_switch_event(struct acpi_video_device *video_device,
+                                   u32 event)
+{
+       if (!brightness_switch_enabled)
+               return;
+
+       video_device->switch_brightness_event = event;
+       schedule_delayed_work(&video_device->switch_brightness_work, HZ / 10);
+}
+
 static void acpi_video_device_notify(acpi_handle handle, u32 event, void *data)
 {
        struct acpi_video_device *video_device = data;
@@ -1618,28 +1705,23 @@ static void acpi_video_device_notify(acpi_handle handle, u32 event, void *data)
 
        switch (event) {
        case ACPI_VIDEO_NOTIFY_CYCLE_BRIGHTNESS:        /* Cycle brightness */
-               if (brightness_switch_enabled)
-                       acpi_video_switch_brightness(video_device, event);
+               brightness_switch_event(video_device, event);
                keycode = KEY_BRIGHTNESS_CYCLE;
                break;
        case ACPI_VIDEO_NOTIFY_INC_BRIGHTNESS:  /* Increase brightness */
-               if (brightness_switch_enabled)
-                       acpi_video_switch_brightness(video_device, event);
+               brightness_switch_event(video_device, event);
                keycode = KEY_BRIGHTNESSUP;
                break;
        case ACPI_VIDEO_NOTIFY_DEC_BRIGHTNESS:  /* Decrease brightness */
-               if (brightness_switch_enabled)
-                       acpi_video_switch_brightness(video_device, event);
+               brightness_switch_event(video_device, event);
                keycode = KEY_BRIGHTNESSDOWN;
                break;
        case ACPI_VIDEO_NOTIFY_ZERO_BRIGHTNESS: /* zero brightness */
-               if (brightness_switch_enabled)
-                       acpi_video_switch_brightness(video_device, event);
+               brightness_switch_event(video_device, event);
                keycode = KEY_BRIGHTNESS_ZERO;
                break;
        case ACPI_VIDEO_NOTIFY_DISPLAY_OFF:     /* display device off */
-               if (brightness_switch_enabled)
-                       acpi_video_switch_brightness(video_device, event);
+               brightness_switch_event(video_device, event);
                keycode = KEY_DISPLAY_OFF;
                break;
        default:
index dae5607e11153a7de89dc5cbec9ba85fda080ba9..4cd52a4541a96c44fe367bb274484f282287ab20 100644 (file)
@@ -456,6 +456,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
 
        /* Promise */
        { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci },   /* PDC42819 */
+       { PCI_VDEVICE(PROMISE, 0x3781), board_ahci },   /* FastTrak TX8660 ahci-mode */
 
        /* Asmedia */
        { PCI_VDEVICE(ASMEDIA, 0x0601), board_ahci },   /* ASM1060 */
index 18d97d5c7d90f216d76abd9033346a734ae8d9ed..677c0c1b03bd658322cad2faf5becd86ce1db3cd 100644 (file)
@@ -4787,6 +4787,10 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
  *     ata_qc_new - Request an available ATA command, for queueing
  *     @ap: target port
  *
+ *     Some ATA host controllers may implement a queue depth which is less
+ *     than ATA_MAX_QUEUE. So we shouldn't allocate a tag which is beyond
+ *     the hardware limitation.
+ *
  *     LOCKING:
  *     None.
  */
@@ -4794,14 +4798,15 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
 {
        struct ata_queued_cmd *qc = NULL;
+       unsigned int max_queue = ap->host->n_tags;
        unsigned int i, tag;
 
        /* no command while frozen */
        if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
                return NULL;
 
-       for (i = 0; i < ATA_MAX_QUEUE; i++) {
-               tag = (i + ap->last_tag + 1) % ATA_MAX_QUEUE;
+       for (i = 0, tag = ap->last_tag + 1; i < max_queue; i++, tag++) {
+               tag = tag < max_queue ? tag : 0;
 
                /* the last tag is reserved for internal command. */
                if (tag == ATA_TAG_INTERNAL)
@@ -6088,6 +6093,7 @@ void ata_host_init(struct ata_host *host, struct device *dev,
 {
        spin_lock_init(&host->lock);
        mutex_init(&host->eh_mutex);
+       host->n_tags = ATA_MAX_QUEUE - 1;
        host->dev = dev;
        host->ops = ops;
 }
@@ -6169,6 +6175,8 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
 {
        int i, rc;
 
+       host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE - 1);
+
        /* host must have been started */
        if (!(host->flags & ATA_HOST_STARTED)) {
                dev_err(host->dev, "BUG: trying to register unstarted host\n");
index 6760fc4e85b8c809e39655fd92adc83871e75fce..dad83df555c49d94534a544483e20aa0a9767635 100644 (file)
@@ -1811,7 +1811,7 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
        case ATA_DEV_ATA:
                if (err & ATA_ICRC)
                        qc->err_mask |= AC_ERR_ATA_BUS;
-               if (err & ATA_UNC)
+               if (err & (ATA_UNC | ATA_AMNF))
                        qc->err_mask |= AC_ERR_MEDIA;
                if (err & ATA_IDNF)
                        qc->err_mask |= AC_ERR_INVALID;
@@ -2556,11 +2556,12 @@ static void ata_eh_link_report(struct ata_link *link)
                }
 
                if (cmd->command != ATA_CMD_PACKET &&
-                   (res->feature & (ATA_ICRC | ATA_UNC | ATA_IDNF |
-                                    ATA_ABORTED)))
-                       ata_dev_err(qc->dev, "error: { %s%s%s%s}\n",
+                   (res->feature & (ATA_ICRC | ATA_UNC | ATA_AMNF |
+                                    ATA_IDNF | ATA_ABORTED)))
+                       ata_dev_err(qc->dev, "error: { %s%s%s%s%s}\n",
                          res->feature & ATA_ICRC ? "ICRC " : "",
                          res->feature & ATA_UNC ? "UNC " : "",
+                         res->feature & ATA_AMNF ? "AMNF " : "",
                          res->feature & ATA_IDNF ? "IDNF " : "",
                          res->feature & ATA_ABORTED ? "ABRT " : "");
 #endif
index 6ad5c072ce348913d8c511574db503a37e798c9c..4d37c5415fc7fec23ba0120a917d706362709770 100644 (file)
@@ -915,7 +915,7 @@ static int ep93xx_pata_probe(struct platform_device *pdev)
        struct ep93xx_pata_data *drv_data;
        struct ata_host *host;
        struct ata_port *ap;
-       unsigned int irq;
+       int irq;
        struct resource *mem_res;
        void __iomem *ide_base;
        int err;
index 9e9227e1762d495b80ef48324ea6ec7c44bf1e27..eee48c49f5def2b6f556454f14228b001d01ee27 100644 (file)
@@ -89,8 +89,13 @@ int platform_get_irq(struct platform_device *dev, unsigned int num)
        return dev->archdata.irqs[num];
 #else
        struct resource *r;
-       if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node)
-               return of_irq_get(dev->dev.of_node, num);
+       if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
+               int ret;
+
+               ret = of_irq_get(dev->dev.of_node, num);
+               if (ret >= 0 || ret == -EPROBE_DEFER)
+                       return ret;
+       }
 
        r = platform_get_resource(dev, IORESOURCE_IRQ, num);
 
@@ -133,8 +138,13 @@ int platform_get_irq_byname(struct platform_device *dev, const char *name)
 {
        struct resource *r;
 
-       if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node)
-               return of_irq_get_byname(dev->dev.of_node, name);
+       if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
+               int ret;
+
+               ret = of_irq_get_byname(dev->dev.of_node, name);
+               if (ret >= 0 || ret == -EPROBE_DEFER)
+                       return ret;
+       }
 
        r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name);
        return r ? r->start : -ENXIO;
index 1b35c45c92b75d7f01fd3fcda7a861ae099b4c7e..3f2e1673808053a4de70077735a67723f9955b64 100644 (file)
@@ -544,6 +544,12 @@ void conn_try_outdate_peer_async(struct drbd_connection *connection)
        struct task_struct *opa;
 
        kref_get(&connection->kref);
+       /* We may just have force_sig()'ed this thread
+        * to get it out of some blocking network function.
+        * Clear signals; otherwise kthread_run(), which internally uses
+        * wait_on_completion_killable(), will mistake our pending signal
+        * for a new fatal signal and fail. */
+       flush_signals(current);
        opa = kthread_run(_try_outdate_peer_async, connection, "drbd_async_h");
        if (IS_ERR(opa)) {
                drbd_err(connection, "out of mem, failed to invoke fence-peer helper\n");
index 089e72cd37bea051bf9f9758644fd8b79ea038cc..36e54be402df30b68c579e09c588193f5f015e00 100644 (file)
@@ -622,11 +622,18 @@ static void zram_reset_device(struct zram *zram, bool reset_capacity)
        memset(&zram->stats, 0, sizeof(zram->stats));
 
        zram->disksize = 0;
-       if (reset_capacity) {
+       if (reset_capacity)
                set_capacity(zram->disk, 0);
-               revalidate_disk(zram->disk);
-       }
+
        up_write(&zram->init_lock);
+
+       /*
+        * Revalidate disk out of the init_lock to avoid lockdep splat.
+        * It's okay because disk's capacity is protected by init_lock
+        * so that revalidate_disk always sees up-to-date capacity.
+        */
+       if (reset_capacity)
+               revalidate_disk(zram->disk);
 }
 
 static ssize_t disksize_store(struct device *dev,
@@ -666,8 +673,15 @@ static ssize_t disksize_store(struct device *dev,
        zram->comp = comp;
        zram->disksize = disksize;
        set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
-       revalidate_disk(zram->disk);
        up_write(&zram->init_lock);
+
+       /*
+        * Revalidate disk out of the init_lock to avoid lockdep splat.
+        * It's okay because disk's capacity is protected by init_lock
+        * so that revalidate_disk always sees up-to-date capacity.
+        */
+       revalidate_disk(zram->disk);
+
        return len;
 
 out_destroy_comp:
index f98380648cb3513fe47ac19213ce0b105a0d1873..f50dffc0374fb4ca9222d75683523c73a9dfce43 100644 (file)
@@ -90,7 +90,6 @@ static const struct usb_device_id ath3k_table[] = {
        { USB_DEVICE(0x0b05, 0x17d0) },
        { USB_DEVICE(0x0CF3, 0x0036) },
        { USB_DEVICE(0x0CF3, 0x3004) },
-       { USB_DEVICE(0x0CF3, 0x3005) },
        { USB_DEVICE(0x0CF3, 0x3008) },
        { USB_DEVICE(0x0CF3, 0x311D) },
        { USB_DEVICE(0x0CF3, 0x311E) },
@@ -140,7 +139,6 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
        { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0CF3, 0x0036), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
-       { USB_DEVICE(0x0cf3, 0x3005), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0x311E), .driver_info = BTUSB_ATH3012 },
index a1c80b0c7663d25baf2224bae0513b3bb29c6543..6250fc2fb93a7257697fa2efe34acfae204dfe7f 100644 (file)
@@ -162,7 +162,6 @@ static const struct usb_device_id blacklist_table[] = {
        { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
-       { USB_DEVICE(0x0cf3, 0x3005), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0x311e), .driver_info = BTUSB_ATH3012 },
index 04680ead9275c20566aeb8268a06e9393fc3107a..fede8ca7147c8bbc778f1a25ec15501ed5f15f99 100644 (file)
@@ -406,6 +406,7 @@ static int h5_rx_3wire_hdr(struct hci_uart *hu, unsigned char c)
            H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT) {
                BT_ERR("Non-link packet received in non-active state");
                h5_reset_rx(h5);
+               return 0;
        }
 
        h5->rx_func = h5_rx_payload;
index 334601cc81cf57ce92e83664ec192a01a5e353c4..c4419ea1ab078485f629ea6853b854dd6621aeb3 100644 (file)
@@ -55,16 +55,41 @@ static DEFINE_MUTEX(rng_mutex);
 static int data_avail;
 static u8 *rng_buffer;
 
+static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
+                              int wait);
+
 static size_t rng_buffer_size(void)
 {
        return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES;
 }
 
+static void add_early_randomness(struct hwrng *rng)
+{
+       unsigned char bytes[16];
+       int bytes_read;
+
+       /*
+        * Currently only virtio-rng cannot return data during device
+        * probe, and that's handled in virtio-rng.c itself.  If there
+        * are more such devices, this call to rng_get_data can be
+        * made conditional here instead of doing it per-device.
+        */
+       bytes_read = rng_get_data(rng, bytes, sizeof(bytes), 1);
+       if (bytes_read > 0)
+               add_device_randomness(bytes, bytes_read);
+}
+
 static inline int hwrng_init(struct hwrng *rng)
 {
-       if (!rng->init)
-               return 0;
-       return rng->init(rng);
+       if (rng->init) {
+               int ret;
+
+               ret =  rng->init(rng);
+               if (ret)
+                       return ret;
+       }
+       add_early_randomness(rng);
+       return 0;
 }
 
 static inline void hwrng_cleanup(struct hwrng *rng)
@@ -304,8 +329,6 @@ int hwrng_register(struct hwrng *rng)
 {
        int err = -EINVAL;
        struct hwrng *old_rng, *tmp;
-       unsigned char bytes[16];
-       int bytes_read;
 
        if (rng->name == NULL ||
            (rng->data_read == NULL && rng->read == NULL))
@@ -347,9 +370,17 @@ int hwrng_register(struct hwrng *rng)
        INIT_LIST_HEAD(&rng->list);
        list_add_tail(&rng->list, &rng_list);
 
-       bytes_read = rng_get_data(rng, bytes, sizeof(bytes), 1);
-       if (bytes_read > 0)
-               add_device_randomness(bytes, bytes_read);
+       if (old_rng && !rng->init) {
+               /*
+                * Use a new device's input to add some randomness to
+                * the system.  If this rng device isn't going to be
+                * used right away, its init function hasn't been
+                * called yet; so only use the randomness from devices
+                * that don't need an init callback.
+                */
+               add_early_randomness(rng);
+       }
+
 out_unlock:
        mutex_unlock(&rng_mutex);
 out:
index f3e71501de5409cb9947b851d2f689ab76545391..e9b15bc18b4d120ffcfb7441cee7cdeb2fca692e 100644 (file)
@@ -38,6 +38,8 @@ struct virtrng_info {
        int index;
 };
 
+static bool probe_done;
+
 static void random_recv_done(struct virtqueue *vq)
 {
        struct virtrng_info *vi = vq->vdev->priv;
@@ -67,6 +69,13 @@ static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait)
        int ret;
        struct virtrng_info *vi = (struct virtrng_info *)rng->priv;
 
+       /*
+        * Don't ask host for data till we're setup.  This call can
+        * happen during hwrng_register(), after commit d9e7972619.
+        */
+       if (unlikely(!probe_done))
+               return 0;
+
        if (!vi->busy) {
                vi->busy = true;
                init_completion(&vi->have_data);
@@ -137,6 +146,7 @@ static int probe_common(struct virtio_device *vdev)
                return err;
        }
 
+       probe_done = true;
        return 0;
 }
 
index 0a7ac0a7b2520a852be7e61d32702dab391fced0..71529e196b8475a7a4123ab5257c039b1e2e5af4 100644 (file)
@@ -641,7 +641,7 @@ retry:
                } while (unlikely(entropy_count < pool_size-2 && pnfrac));
        }
 
-       if (entropy_count < 0) {
+       if (unlikely(entropy_count < 0)) {
                pr_warn("random: negative entropy/overflow: pool %s count %d\n",
                        r->name, entropy_count);
                WARN_ON(1);
@@ -981,7 +981,7 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
                      int reserved)
 {
        int entropy_count, orig;
-       size_t ibytes;
+       size_t ibytes, nfrac;
 
        BUG_ON(r->entropy_count > r->poolinfo->poolfracbits);
 
@@ -999,7 +999,17 @@ retry:
        }
        if (ibytes < min)
                ibytes = 0;
-       if ((entropy_count -= ibytes << (ENTROPY_SHIFT + 3)) < 0)
+
+       if (unlikely(entropy_count < 0)) {
+               pr_warn("random: negative entropy count: pool %s count %d\n",
+                       r->name, entropy_count);
+               WARN_ON(1);
+               entropy_count = 0;
+       }
+       nfrac = ibytes << (ENTROPY_SHIFT + 3);
+       if ((size_t) entropy_count > nfrac)
+               entropy_count -= nfrac;
+       else
                entropy_count = 0;
 
        if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
@@ -1376,6 +1386,7 @@ urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
                            "with %d bits of entropy available\n",
                            current->comm, nonblocking_pool.entropy_total);
 
+       nbytes = min_t(size_t, nbytes, INT_MAX >> (ENTROPY_SHIFT + 3));
        ret = extract_entropy_user(&nonblocking_pool, buf, nbytes);
 
        trace_urandom_read(8 * nbytes, ENTROPY_BITS(&nonblocking_pool),
index ebac671150098b179e091aba0744a7757d3b1b2e..7364a538e0562a82b855c95305f983d8a63eb2be 100644 (file)
@@ -104,6 +104,7 @@ config ARM_IMX6Q_CPUFREQ
        tristate "Freescale i.MX6 cpufreq support"
        depends on ARCH_MXC
        depends on REGULATOR_ANATOP
+       select PM_OPP
        help
          This adds cpufreq driver support for Freescale i.MX6 series SoCs.
 
@@ -118,7 +119,7 @@ config ARM_INTEGRATOR
          If in doubt, say Y.
 
 config ARM_KIRKWOOD_CPUFREQ
-       def_bool MACH_KIRKWOOD
+       def_bool ARCH_KIRKWOOD || MACH_KIRKWOOD
        help
          This adds the CPUFreq driver for Marvell Kirkwood
          SoCs.
index ee1ae303a07c45176f562ce0662fe2ad1051640a..86beda9f950b7a8b3620b458f8d42b231fdde624 100644 (file)
@@ -152,11 +152,8 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
                goto out_put_reg;
        }
 
-       ret = of_init_opp_table(cpu_dev);
-       if (ret) {
-               pr_err("failed to init OPP table: %d\n", ret);
-               goto out_put_clk;
-       }
+       /* OPPs might be populated at runtime, don't check for error here */
+       of_init_opp_table(cpu_dev);
 
        ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
        if (ret) {
index 62259d27f03e6a012bb47f349cc681b9aa9f7e43..6f024852c6fbdecc29845b199ded752e0afce468 100644 (file)
@@ -1153,10 +1153,12 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
         * the creation of a brand new one. So we need to perform this update
         * by invoking update_policy_cpu().
         */
-       if (recover_policy && cpu != policy->cpu)
+       if (recover_policy && cpu != policy->cpu) {
                update_policy_cpu(policy, cpu);
-       else
+               WARN_ON(kobject_move(&policy->kobj, &dev->kobj));
+       } else {
                policy->cpu = cpu;
+       }
 
        cpumask_copy(policy->cpus, cpumask_of(cpu));
 
index 546376719d8f317407444419204344c94cd7a853..b5befc2111721d841a4e5801bb0f89884e92ceac 100644 (file)
@@ -349,7 +349,7 @@ static int __init sa1110_clk_init(void)
                        name = "K4S641632D";
                if (machine_is_h3100())
                        name = "KM416S4030CT";
-               if (machine_is_jornada720())
+               if (machine_is_jornada720() || machine_is_h3600())
                        name = "K4S281632B-1H";
                if (machine_is_nanoengine())
                        name = "MT48LC8M16A2TG-75";
index 4199849e37585181eace8176b55d4e81cbfd06db..145974f9662b63e603e18ac40a013f1f9a8ba2ad 100644 (file)
@@ -1,4 +1,5 @@
 menu "IEEE 1394 (FireWire) support"
+       depends on HAS_DMA
        depends on PCI || COMPILE_TEST
        # firewire-core does not depend on PCI but is
        # not useful without PCI controller driver
index 57985410f12f7ea9770818457e2d9550d815a3dd..a66a3217f1d92f939fa4f6f2b1ee7a7ee976488f 100644 (file)
@@ -336,10 +336,10 @@ static const struct {
                QUIRK_CYCLE_TIMER | QUIRK_IR_WAKE},
 
        {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT6315, 0,
-               QUIRK_CYCLE_TIMER | QUIRK_NO_MSI},
+               QUIRK_CYCLE_TIMER /* FIXME: necessary? */ | QUIRK_NO_MSI},
 
        {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT6315, PCI_ANY_ID,
-               0},
+               QUIRK_NO_MSI},
 
        {PCI_VENDOR_ID_VIA, PCI_ANY_ID, PCI_ANY_ID,
                QUIRK_CYCLE_TIMER | QUIRK_NO_MSI},
index 1491dd4f08f9f1526718835a3e4ef63c86d1e659..65f2f3fdde2417e5e923dba733aba14b5dadf3dc 100644 (file)
@@ -262,7 +262,7 @@ static const char *cper_pcie_port_type_strs[] = {
 };
 
 static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie,
-                           const struct acpi_generic_data *gdata)
+                           const struct acpi_hest_generic_data *gdata)
 {
        if (pcie->validation_bits & CPER_PCIE_VALID_PORT_TYPE)
                printk("%s""port_type: %d, %s\n", pfx, pcie->port_type,
@@ -298,7 +298,7 @@ static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie,
 }
 
 static void cper_estatus_print_section(
-       const char *pfx, const struct acpi_generic_data *gdata, int sec_no)
+       const char *pfx, const struct acpi_hest_generic_data *gdata, int sec_no)
 {
        uuid_le *sec_type = (uuid_le *)gdata->section_type;
        __u16 severity;
@@ -344,9 +344,9 @@ err_section_too_small:
 }
 
 void cper_estatus_print(const char *pfx,
-                       const struct acpi_generic_status *estatus)
+                       const struct acpi_hest_generic_status *estatus)
 {
-       struct acpi_generic_data *gdata;
+       struct acpi_hest_generic_data *gdata;
        unsigned int data_len, gedata_len;
        int sec_no = 0;
        char newpfx[64];
@@ -359,7 +359,7 @@ void cper_estatus_print(const char *pfx,
                       "and requires no further action");
        printk("%s""event severity: %s\n", pfx, cper_severity_str(severity));
        data_len = estatus->data_length;
-       gdata = (struct acpi_generic_data *)(estatus + 1);
+       gdata = (struct acpi_hest_generic_data *)(estatus + 1);
        snprintf(newpfx, sizeof(newpfx), "%s%s", pfx, INDENT_SP);
        while (data_len >= sizeof(*gdata)) {
                gedata_len = gdata->error_data_length;
@@ -371,10 +371,10 @@ void cper_estatus_print(const char *pfx,
 }
 EXPORT_SYMBOL_GPL(cper_estatus_print);
 
-int cper_estatus_check_header(const struct acpi_generic_status *estatus)
+int cper_estatus_check_header(const struct acpi_hest_generic_status *estatus)
 {
        if (estatus->data_length &&
-           estatus->data_length < sizeof(struct acpi_generic_data))
+           estatus->data_length < sizeof(struct acpi_hest_generic_data))
                return -EINVAL;
        if (estatus->raw_data_length &&
            estatus->raw_data_offset < sizeof(*estatus) + estatus->data_length)
@@ -384,9 +384,9 @@ int cper_estatus_check_header(const struct acpi_generic_status *estatus)
 }
 EXPORT_SYMBOL_GPL(cper_estatus_check_header);
 
-int cper_estatus_check(const struct acpi_generic_status *estatus)
+int cper_estatus_check(const struct acpi_hest_generic_status *estatus)
 {
-       struct acpi_generic_data *gdata;
+       struct acpi_hest_generic_data *gdata;
        unsigned int data_len, gedata_len;
        int rc;
 
@@ -394,7 +394,7 @@ int cper_estatus_check(const struct acpi_generic_status *estatus)
        if (rc)
                return rc;
        data_len = estatus->data_length;
-       gdata = (struct acpi_generic_data *)(estatus + 1);
+       gdata = (struct acpi_hest_generic_data *)(estatus + 1);
        while (data_len >= sizeof(*gdata)) {
                gedata_len = gdata->error_data_length;
                if (gedata_len > data_len - sizeof(*gdata))
index eff1a2f22f09bb4b147488aeb8ae0b11dbb6f6d8..dc79346689e6040dfe127b609d99655da079d26a 100644 (file)
@@ -346,6 +346,7 @@ static __initdata struct {
 
 struct param_info {
        int verbose;
+       int found;
        void *params;
 };
 
@@ -362,16 +363,12 @@ static int __init fdt_find_uefi_params(unsigned long node, const char *uname,
            (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0))
                return 0;
 
-       pr_info("Getting parameters from FDT:\n");
-
        for (i = 0; i < ARRAY_SIZE(dt_params); i++) {
                prop = of_get_flat_dt_prop(node, dt_params[i].propname, &len);
-               if (!prop) {
-                       pr_err("Can't find %s in device tree!\n",
-                              dt_params[i].name);
+               if (!prop)
                        return 0;
-               }
                dest = info->params + dt_params[i].offset;
+               info->found++;
 
                val = of_read_number(prop, len / sizeof(u32));
 
@@ -390,10 +387,21 @@ static int __init fdt_find_uefi_params(unsigned long node, const char *uname,
 int __init efi_get_fdt_params(struct efi_fdt_params *params, int verbose)
 {
        struct param_info info;
+       int ret;
+
+       pr_info("Getting EFI parameters from FDT:\n");
 
        info.verbose = verbose;
+       info.found = 0;
        info.params = params;
 
-       return of_scan_flat_dt(fdt_find_uefi_params, &info);
+       ret = of_scan_flat_dt(fdt_find_uefi_params, &info);
+       if (!info.found)
+               pr_info("UEFI not found.\n");
+       else if (!ret)
+               pr_err("Can't find '%s' in device tree!\n",
+                      dt_params[info.found].name);
+
+       return ret;
 }
 #endif /* CONFIG_EFI_PARAMS_FROM_FDT */
index 82d774161cc9d65783954451aaee771c24e2e2e6..507a3df46a5dabba812a647077a752a56fd7116c 100644 (file)
@@ -23,16 +23,6 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
        u32 fdt_val32;
        u64 fdt_val64;
 
-       /*
-        * Copy definition of linux_banner here.  Since this code is
-        * built as part of the decompressor for ARM v7, pulling
-        * in version.c where linux_banner is defined for the
-        * kernel brings other kernel dependencies with it.
-        */
-       const char linux_banner[] =
-           "Linux version " UTS_RELEASE " (" LINUX_COMPILE_BY "@"
-           LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION "\n";
-
        /* Do some checks on provided FDT, if it exists*/
        if (orig_fdt) {
                if (fdt_check_header(orig_fdt)) {
index fe7c0e211f9a85becb51bf64a808033c50203cee..57adbc90fdad8f4dc821fea920f599031e602dad 100644 (file)
@@ -900,8 +900,6 @@ static int mcp23s08_probe(struct spi_device *spi)
                        if (spi_present_mask & (1 << addr))
                                chips++;
                }
-               if (!chips)
-                       return -ENODEV;
        } else {
                type = spi_get_device_id(spi)->driver_data;
                pdata = dev_get_platdata(&spi->dev);
@@ -940,10 +938,6 @@ static int mcp23s08_probe(struct spi_device *spi)
                if (!(spi_present_mask & (1 << addr)))
                        continue;
                chips--;
-               if (chips < 0) {
-                       dev_err(&spi->dev, "FATAL: invalid negative chip id\n");
-                       goto fail;
-               }
                data->mcp[addr] = &data->chip[chips];
                status = mcp23s08_probe_one(data->mcp[addr], &spi->dev, spi,
                                            0x40 | (addr << 1), type, base,
index 0c9f803fc1acdb3fe9e6798ce506e1ec21a41680..b6ae89ea88119b2b1d35b522f64e07470d20a8dc 100644 (file)
@@ -284,6 +284,7 @@ static int gpio_rcar_irq_domain_map(struct irq_domain *h, unsigned int irq,
 
 static struct irq_domain_ops gpio_rcar_irq_domain_ops = {
        .map    = gpio_rcar_irq_domain_map,
+       .xlate  = irq_domain_xlate_twocell,
 };
 
 struct gpio_rcar_info {
index f36126383d260166a950ad20c72b69637246c550..d893e4da5dcef9cc0c30d873f6f3d6033a27a03d 100644 (file)
@@ -1616,22 +1616,6 @@ out:
        return ret;
 }
 
-void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
-{
-       struct i915_vma *vma;
-
-       /*
-        * Only the global gtt is relevant for gtt memory mappings, so restrict
-        * list traversal to objects bound into the global address space. Note
-        * that the active list should be empty, but better safe than sorry.
-        */
-       WARN_ON(!list_empty(&dev_priv->gtt.base.active_list));
-       list_for_each_entry(vma, &dev_priv->gtt.base.active_list, mm_list)
-               i915_gem_release_mmap(vma->obj);
-       list_for_each_entry(vma, &dev_priv->gtt.base.inactive_list, mm_list)
-               i915_gem_release_mmap(vma->obj);
-}
-
 /**
  * i915_gem_release_mmap - remove physical page mappings
  * @obj: obj in question
@@ -1657,6 +1641,15 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
        obj->fault_mappable = false;
 }
 
+void
+i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
+{
+       struct drm_i915_gem_object *obj;
+
+       list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
+               i915_gem_release_mmap(obj);
+}
+
 uint32_t
 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
 {
index 3521f998a1788488b8c396860f6433ffacb81ae2..34894b57306401645a2184d48d99ed2966c8c9f1 100644 (file)
@@ -31,7 +31,7 @@
 struct i915_render_state {
        struct drm_i915_gem_object *obj;
        unsigned long ggtt_offset;
-       void *batch;
+       u32 *batch;
        u32 size;
        u32 len;
 };
@@ -80,7 +80,7 @@ free:
 
 static void render_state_free(struct i915_render_state *so)
 {
-       kunmap(so->batch);
+       kunmap(kmap_to_page(so->batch));
        i915_gem_object_ggtt_unpin(so->obj);
        drm_gem_object_unreference(&so->obj->base);
        kfree(so);
index 267f069765adedffb942d1352649502ac624a364..c05c84f3f091382729b5cfcfa867471faed06a83 100644 (file)
@@ -2845,7 +2845,7 @@ static int semaphore_passed(struct intel_engine_cs *ring)
 {
        struct drm_i915_private *dev_priv = ring->dev->dev_private;
        struct intel_engine_cs *signaller;
-       u32 seqno, ctl;
+       u32 seqno;
 
        ring->hangcheck.deadlock++;
 
@@ -2857,15 +2857,12 @@ static int semaphore_passed(struct intel_engine_cs *ring)
        if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
                return -1;
 
-       /* cursory check for an unkickable deadlock */
-       ctl = I915_READ_CTL(signaller);
-       if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0)
-               return -1;
-
        if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
                return 1;
 
-       if (signaller->hangcheck.deadlock)
+       /* cursory check for an unkickable deadlock */
+       if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
+           semaphore_passed(signaller) < 0)
                return -1;
 
        return 0;
index e27e7804c0b974d85dd1d87cb2a57f7bebae78ca..f0be855ddf45c5b817edd0efb7b28eb658fe032c 100644 (file)
@@ -11673,6 +11673,9 @@ static struct intel_quirk intel_quirks[] = {
 
        /* Toshiba CB35 Chromebook (Celeron 2955U) */
        { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
+
+       /* HP Chromebook 14 (Celeron 2955U) */
+       { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
 };
 
 static void intel_init_quirks(struct drm_device *dev)
@@ -11911,6 +11914,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
                 * ...  */
                plane = crtc->plane;
                crtc->plane = !plane;
+               crtc->primary_enabled = true;
                dev_priv->display.crtc_disable(&crtc->base);
                crtc->plane = plane;
 
index 075170d1844fa653d40a4a5d0827a5026859f2b8..8a1a4fbc06ac85c5c41b58750d44219777c37d6b 100644 (file)
@@ -906,8 +906,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
                mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
                                                   bpp);
 
-               for (lane_count = min_lane_count; lane_count <= max_lane_count; lane_count <<= 1) {
-                       for (clock = min_clock; clock <= max_clock; clock++) {
+               for (clock = min_clock; clock <= max_clock; clock++) {
+                       for (lane_count = min_lane_count; lane_count <= max_lane_count; lane_count <<= 1) {
                                link_clock = drm_dp_bw_code_to_link_rate(bws[clock]);
                                link_avail = intel_dp_max_data_rate(link_clock,
                                                                    lane_count);
index 23126023aeba04e6819d9ad1fdd64f2b8d69b646..5e5a72fca5fbcf51fcf8f925fbb3f2508f508911 100644 (file)
@@ -111,6 +111,13 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
 
        pipe_config->adjusted_mode.flags |= flags;
 
+       /* gen2/3 store dither state in pfit control, needs to match */
+       if (INTEL_INFO(dev)->gen < 4) {
+               tmp = I915_READ(PFIT_CONTROL);
+
+               pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE;
+       }
+
        dotclock = pipe_config->port_clock;
 
        if (HAS_PCH_SPLIT(dev_priv->dev))
index 628cd8938274ca44ff1a0c1efa0e338e5a05f9d0..12b02fe1d0aed7349662932bad8c38b53d914ac3 100644 (file)
@@ -361,16 +361,16 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
                pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) |
                                 PFIT_FILTER_FUZZY);
 
-       /* Make sure pre-965 set dither correctly for 18bpp panels. */
-       if (INTEL_INFO(dev)->gen < 4 && pipe_config->pipe_bpp == 18)
-               pfit_control |= PANEL_8TO6_DITHER_ENABLE;
-
 out:
        if ((pfit_control & PFIT_ENABLE) == 0) {
                pfit_control = 0;
                pfit_pgm_ratios = 0;
        }
 
+       /* Make sure pre-965 set dither correctly for 18bpp panels. */
+       if (INTEL_INFO(dev)->gen < 4 && pipe_config->pipe_bpp == 18)
+               pfit_control |= PANEL_8TO6_DITHER_ENABLE;
+
        pipe_config->gmch_pfit.control = pfit_control;
        pipe_config->gmch_pfit.pgm_ratios = pfit_pgm_ratios;
        pipe_config->gmch_pfit.lvds_border_bits = border;
index cfde9eb44ad0142309970f2eac404c8c5b368c69..6212537b90c5bc85e0ebfa8d9cf008f30ab2d233 100644 (file)
@@ -192,11 +192,11 @@ alarm_timer_callback(struct nouveau_alarm *alarm)
        nouveau_therm_threshold_hyst_polling(therm, &sensor->thrs_shutdown,
                                             NOUVEAU_THERM_THRS_SHUTDOWN);
 
+       spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags);
+
        /* schedule the next poll in one second */
        if (therm->temp_get(therm) >= 0 && list_empty(&alarm->head))
-               ptimer->alarm(ptimer, 1000 * 1000 * 1000, alarm);
-
-       spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags);
+               ptimer->alarm(ptimer, 1000000000ULL, alarm);
 }
 
 void
index 34d6a85e9023655efd5c67bd6b1e67d20b34b1bc..0bf1e20c6e44cee16a4c11b18b7903713e4707a9 100644 (file)
@@ -33,6 +33,9 @@ irqreturn_t qxl_irq_handler(int irq, void *arg)
 
        pending = xchg(&qdev->ram_header->int_pending, 0);
 
+       if (!pending)
+               return IRQ_NONE;
+
        atomic_inc(&qdev->irq_received);
 
        if (pending & QXL_INTERRUPT_DISPLAY) {
index a03c73411a56ab3131871151f41dd625021e013f..30d242b25078e1f5c35d63384961db4279c7ffd4 100644 (file)
@@ -1414,8 +1414,8 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
        tmp &= ~EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN;
        WREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp);
 
-       /* set pageflip to happen anywhere in vblank interval */
-       WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0);
+       /* set pageflip to happen only at start of vblank interval (front porch) */
+       WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 3);
 
        if (!atomic && fb && fb != crtc->primary->fb) {
                radeon_fb = to_radeon_framebuffer(fb);
@@ -1614,8 +1614,8 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
        tmp &= ~AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN;
        WREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp);
 
-       /* set pageflip to happen anywhere in vblank interval */
-       WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0);
+       /* set pageflip to happen only at start of vblank interval (front porch) */
+       WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 3);
 
        if (!atomic && fb && fb != crtc->primary->fb) {
                radeon_fb = to_radeon_framebuffer(fb);
index 2b2908440644e8908accbeabc855d5c5066c7d47..7d68203a3737f39d49594dc25dbdda64365a45b7 100644 (file)
@@ -183,7 +183,6 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
        struct backlight_properties props;
        struct radeon_backlight_privdata *pdata;
        struct radeon_encoder_atom_dig *dig;
-       u8 backlight_level;
        char bl_name[16];
 
        /* Mac laptops with multiple GPUs use the gmux driver for backlight
@@ -222,12 +221,17 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
 
        pdata->encoder = radeon_encoder;
 
-       backlight_level = radeon_atom_get_backlight_level_from_reg(rdev);
-
        dig = radeon_encoder->enc_priv;
        dig->bl_dev = bd;
 
        bd->props.brightness = radeon_atom_backlight_get_brightness(bd);
+       /* Set a reasonable default here if the level is 0 otherwise
+        * fbdev will attempt to turn the backlight on after console
+        * unblanking and it will try and restore 0 which turns the backlight
+        * off again.
+        */
+       if (bd->props.brightness == 0)
+               bd->props.brightness = RADEON_MAX_BL_LEVEL;
        bd->props.power = FB_BLANK_UNBLANK;
        backlight_update_status(bd);
 
index 0b2471107137a90af6677ece89a7351c9b97f599..c0ea66192fe03a8fe977ac0aea977a07204530c4 100644 (file)
@@ -2291,6 +2291,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
                                gb_tile_moden = 0;
                                break;
                        }
+                       rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden;
                        WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
                }
        } else if (num_pipe_configs == 8) {
@@ -7376,6 +7377,7 @@ static inline u32 cik_get_ih_wptr(struct radeon_device *rdev)
                tmp = RREG32(IH_RB_CNTL);
                tmp |= IH_WPTR_OVERFLOW_CLEAR;
                WREG32(IH_RB_CNTL, tmp);
+               wptr &= ~RB_OVERFLOW;
        }
        return (wptr & rdev->ih.ptr_mask);
 }
index f7ece0ff431b3eaa6bec99cad2a08332b89c9b33..15e4f28015e1e74fcc0a3e6189feee1274fa1794 100644 (file)
@@ -2642,8 +2642,9 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
        for (i = 0; i < rdev->num_crtc; i++) {
                if (save->crtc_enabled[i]) {
                        tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]);
-                       if ((tmp & 0x3) != 0) {
-                               tmp &= ~0x3;
+                       if ((tmp & 0x7) != 3) {
+                               tmp &= ~0x7;
+                               tmp |= 0x3;
                                WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
                        }
                        tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
@@ -4755,6 +4756,7 @@ static u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
                tmp = RREG32(IH_RB_CNTL);
                tmp |= IH_WPTR_OVERFLOW_CLEAR;
                WREG32(IH_RB_CNTL, tmp);
+               wptr &= ~RB_OVERFLOW;
        }
        return (wptr & rdev->ih.ptr_mask);
 }
index 333d143fca2ccf8700096db893798741baf21b5e..23bff590fb6e8057277bf55795915dfad9c6cb79 100644 (file)
 #       define EVERGREEN_CRTC_V_BLANK                   (1 << 0)
 #define EVERGREEN_CRTC_STATUS_POSITION                  0x6e90
 #define EVERGREEN_CRTC_STATUS_HV_COUNT                  0x6ea0
-#define EVERGREEN_MASTER_UPDATE_MODE                    0x6ef8
 #define EVERGREEN_CRTC_UPDATE_LOCK                      0x6ed4
 #define EVERGREEN_MASTER_UPDATE_LOCK                    0x6ef4
 #define EVERGREEN_MASTER_UPDATE_MODE                    0x6ef8
index c66952d4b00cc5d706a13650fe2d82ef71389f1d..3c69f58e46efd94b02a440e15e8aad66f9241712 100644 (file)
@@ -3795,6 +3795,7 @@ static u32 r600_get_ih_wptr(struct radeon_device *rdev)
                tmp = RREG32(IH_RB_CNTL);
                tmp |= IH_WPTR_OVERFLOW_CLEAR;
                WREG32(IH_RB_CNTL, tmp);
+               wptr &= ~RB_OVERFLOW;
        }
        return (wptr & rdev->ih.ptr_mask);
 }
index 29d9cc04c04ecbd0c0ffe985165822a6a7cda503..60c47f8291222369f7f6070953eea7bda1a25ce9 100644 (file)
@@ -449,6 +449,7 @@ struct radeon_bo_va {
 
        /* protected by vm mutex */
        struct list_head                vm_list;
+       struct list_head                vm_status;
 
        /* constant after initialization */
        struct radeon_vm                *vm;
@@ -684,10 +685,9 @@ struct radeon_flip_work {
        struct work_struct              unpin_work;
        struct radeon_device            *rdev;
        int                             crtc_id;
-       struct drm_framebuffer          *fb;
+       uint64_t                        base;
        struct drm_pending_vblank_event *event;
        struct radeon_bo                *old_rbo;
-       struct radeon_bo                *new_rbo;
        struct radeon_fence             *fence;
 };
 
@@ -868,6 +868,9 @@ struct radeon_vm {
        struct list_head                va;
        unsigned                        id;
 
+       /* BOs freed, but not yet updated in the PT */
+       struct list_head                freed;
+
        /* contains the page directory */
        struct radeon_bo                *page_directory;
        uint64_t                        pd_gpu_addr;
@@ -876,6 +879,8 @@ struct radeon_vm {
        /* array of page tables, one for each page directory entry */
        struct radeon_vm_pt             *page_tables;
 
+       struct radeon_bo_va             *ib_bo_va;
+
        struct mutex                    mutex;
        /* last fence for cs using this vm */
        struct radeon_fence             *fence;
@@ -2833,9 +2838,10 @@ void radeon_vm_fence(struct radeon_device *rdev,
 uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr);
 int radeon_vm_update_page_directory(struct radeon_device *rdev,
                                    struct radeon_vm *vm);
+int radeon_vm_clear_freed(struct radeon_device *rdev,
+                         struct radeon_vm *vm);
 int radeon_vm_bo_update(struct radeon_device *rdev,
-                       struct radeon_vm *vm,
-                       struct radeon_bo *bo,
+                       struct radeon_bo_va *bo_va,
                        struct ttm_mem_reg *mem);
 void radeon_vm_bo_invalidate(struct radeon_device *rdev,
                             struct radeon_bo *bo);
@@ -2848,8 +2854,8 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
                          struct radeon_bo_va *bo_va,
                          uint64_t offset,
                          uint32_t flags);
-int radeon_vm_bo_rmv(struct radeon_device *rdev,
-                    struct radeon_bo_va *bo_va);
+void radeon_vm_bo_rmv(struct radeon_device *rdev,
+                     struct radeon_bo_va *bo_va);
 
 /* audio */
 void r600_audio_update_hdmi(struct work_struct *work);
index 71a143461478a60f5d9bc8428a0276c38b78953b..ae763f60c8a0a23f551bffb5119770d94527931a 100644 (file)
@@ -461,13 +461,23 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
                                   struct radeon_vm *vm)
 {
        struct radeon_device *rdev = p->rdev;
+       struct radeon_bo_va *bo_va;
        int i, r;
 
        r = radeon_vm_update_page_directory(rdev, vm);
        if (r)
                return r;
 
-       r = radeon_vm_bo_update(rdev, vm, rdev->ring_tmp_bo.bo,
+       r = radeon_vm_clear_freed(rdev, vm);
+       if (r)
+               return r;
+
+       if (vm->ib_bo_va == NULL) {
+               DRM_ERROR("Tmp BO not in VM!\n");
+               return -EINVAL;
+       }
+
+       r = radeon_vm_bo_update(rdev, vm->ib_bo_va,
                                &rdev->ring_tmp_bo.bo->tbo.mem);
        if (r)
                return r;
@@ -480,7 +490,13 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
                        continue;
 
                bo = p->relocs[i].robj;
-               r = radeon_vm_bo_update(rdev, vm, bo, &bo->tbo.mem);
+               bo_va = radeon_vm_bo_find(vm, bo);
+               if (bo_va == NULL) {
+                       dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
+                       return -EINVAL;
+               }
+
+               r = radeon_vm_bo_update(rdev, bo_va, &bo->tbo.mem);
                if (r)
                        return r;
        }
index 03686fab842d3c2e0de237fa6fb027895a025a80..697add2cd4e34e89b6276659142f476ccf0c1e64 100644 (file)
@@ -1056,36 +1056,36 @@ static void radeon_check_arguments(struct radeon_device *rdev)
        if (!radeon_check_pot_argument(radeon_vm_size)) {
                dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n",
                         radeon_vm_size);
-               radeon_vm_size = 4096;
+               radeon_vm_size = 4;
        }
 
-       if (radeon_vm_size < 4) {
-               dev_warn(rdev->dev, "VM size (%d) to small, min is 4MB\n",
+       if (radeon_vm_size < 1) {
+               dev_warn(rdev->dev, "VM size (%d) to small, min is 1GB\n",
                         radeon_vm_size);
-               radeon_vm_size = 4096;
+               radeon_vm_size = 4;
        }
 
        /*
         * Max GPUVM size for Cayman, SI and CI are 40 bits.
         */
-       if (radeon_vm_size > 1024*1024) {
-               dev_warn(rdev->dev, "VM size (%d) to large, max is 1TB\n",
+       if (radeon_vm_size > 1024) {
+               dev_warn(rdev->dev, "VM size (%d) too large, max is 1TB\n",
                         radeon_vm_size);
-               radeon_vm_size = 4096;
+               radeon_vm_size = 4;
        }
 
        /* defines number of bits in page table versus page directory,
         * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
         * page table and the remaining bits are in the page directory */
        if (radeon_vm_block_size < 9) {
-               dev_warn(rdev->dev, "VM page table size (%d) to small\n",
+               dev_warn(rdev->dev, "VM page table size (%d) too small\n",
                         radeon_vm_block_size);
                radeon_vm_block_size = 9;
        }
 
        if (radeon_vm_block_size > 24 ||
-           radeon_vm_size < (1ull << radeon_vm_block_size)) {
-               dev_warn(rdev->dev, "VM page table size (%d) to large\n",
+           (radeon_vm_size * 1024) < (1ull << radeon_vm_block_size)) {
+               dev_warn(rdev->dev, "VM page table size (%d) too large\n",
                         radeon_vm_block_size);
                radeon_vm_block_size = 9;
        }
@@ -1238,7 +1238,7 @@ int radeon_device_init(struct radeon_device *rdev,
        /* Adjust VM size here.
         * Max GPUVM size for cayman+ is 40 bits.
         */
-       rdev->vm_manager.max_pfn = radeon_vm_size << 8;
+       rdev->vm_manager.max_pfn = radeon_vm_size << 18;
 
        /* Set asic functions */
        r = radeon_asic_init(rdev);
index 13896edcf0b6507337e728269732e4c391883115..bf25061c8ac4ee37b0f1c72b003427eb551dca22 100644 (file)
@@ -366,7 +366,6 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
        spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
 
        drm_vblank_put(rdev->ddev, radeon_crtc->crtc_id);
-       radeon_fence_unref(&work->fence);
        radeon_irq_kms_pflip_irq_put(rdev, work->crtc_id);
        queue_work(radeon_crtc->flip_queue, &work->unpin_work);
 }
@@ -386,51 +385,108 @@ static void radeon_flip_work_func(struct work_struct *__work)
        struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[work->crtc_id];
 
        struct drm_crtc *crtc = &radeon_crtc->base;
-       struct drm_framebuffer *fb = work->fb;
-
-       uint32_t tiling_flags, pitch_pixels;
-       uint64_t base;
-
        unsigned long flags;
        int r;
 
         down_read(&rdev->exclusive_lock);
-       while (work->fence) {
+       if (work->fence) {
                r = radeon_fence_wait(work->fence, false);
                if (r == -EDEADLK) {
                        up_read(&rdev->exclusive_lock);
                        r = radeon_gpu_reset(rdev);
                        down_read(&rdev->exclusive_lock);
                }
+               if (r)
+                       DRM_ERROR("failed to wait on page flip fence (%d)!\n", r);
 
-               if (r) {
-                       DRM_ERROR("failed to wait on page flip fence (%d)!\n",
-                                 r);
-                       goto cleanup;
-               } else
-                       radeon_fence_unref(&work->fence);
+               /* We continue with the page flip even if we failed to wait on
+                * the fence, otherwise the DRM core and userspace will be
+                * confused about which BO the CRTC is scanning out
+                */
+
+               radeon_fence_unref(&work->fence);
        }
 
+       /* We borrow the event spin lock for protecting flip_status */
+       spin_lock_irqsave(&crtc->dev->event_lock, flags);
+
+       /* set the proper interrupt */
+       radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id);
+
+       /* do the flip (mmio) */
+       radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base);
+
+       radeon_crtc->flip_status = RADEON_FLIP_SUBMITTED;
+       spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+       up_read(&rdev->exclusive_lock);
+}
+
+static int radeon_crtc_page_flip(struct drm_crtc *crtc,
+                                struct drm_framebuffer *fb,
+                                struct drm_pending_vblank_event *event,
+                                uint32_t page_flip_flags)
+{
+       struct drm_device *dev = crtc->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+       struct radeon_framebuffer *old_radeon_fb;
+       struct radeon_framebuffer *new_radeon_fb;
+       struct drm_gem_object *obj;
+       struct radeon_flip_work *work;
+       struct radeon_bo *new_rbo;
+       uint32_t tiling_flags, pitch_pixels;
+       uint64_t base;
+       unsigned long flags;
+       int r;
+
+       work = kzalloc(sizeof *work, GFP_KERNEL);
+       if (work == NULL)
+               return -ENOMEM;
+
+       INIT_WORK(&work->flip_work, radeon_flip_work_func);
+       INIT_WORK(&work->unpin_work, radeon_unpin_work_func);
+
+       work->rdev = rdev;
+       work->crtc_id = radeon_crtc->crtc_id;
+       work->event = event;
+
+       /* schedule unpin of the old buffer */
+       old_radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
+       obj = old_radeon_fb->obj;
+
+       /* take a reference to the old object */
+       drm_gem_object_reference(obj);
+       work->old_rbo = gem_to_radeon_bo(obj);
+
+       new_radeon_fb = to_radeon_framebuffer(fb);
+       obj = new_radeon_fb->obj;
+       new_rbo = gem_to_radeon_bo(obj);
+
+       spin_lock(&new_rbo->tbo.bdev->fence_lock);
+       if (new_rbo->tbo.sync_obj)
+               work->fence = radeon_fence_ref(new_rbo->tbo.sync_obj);
+       spin_unlock(&new_rbo->tbo.bdev->fence_lock);
+
        /* pin the new buffer */
-       DRM_DEBUG_DRIVER("flip-ioctl() cur_fbo = %p, cur_bbo = %p\n",
-                        work->old_rbo, work->new_rbo);
+       DRM_DEBUG_DRIVER("flip-ioctl() cur_rbo = %p, new_rbo = %p\n",
+                        work->old_rbo, new_rbo);
 
-       r = radeon_bo_reserve(work->new_rbo, false);
+       r = radeon_bo_reserve(new_rbo, false);
        if (unlikely(r != 0)) {
                DRM_ERROR("failed to reserve new rbo buffer before flip\n");
                goto cleanup;
        }
        /* Only 27 bit offset for legacy CRTC */
-       r = radeon_bo_pin_restricted(work->new_rbo, RADEON_GEM_DOMAIN_VRAM,
+       r = radeon_bo_pin_restricted(new_rbo, RADEON_GEM_DOMAIN_VRAM,
                                     ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27, &base);
        if (unlikely(r != 0)) {
-               radeon_bo_unreserve(work->new_rbo);
+               radeon_bo_unreserve(new_rbo);
                r = -EINVAL;
                DRM_ERROR("failed to pin new rbo buffer before flip\n");
                goto cleanup;
        }
-       radeon_bo_get_tiling_flags(work->new_rbo, &tiling_flags, NULL);
-       radeon_bo_unreserve(work->new_rbo);
+       radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL);
+       radeon_bo_unreserve(new_rbo);
 
        if (!ASIC_IS_AVIVO(rdev)) {
                /* crtc offset is from display base addr not FB location */
@@ -467,6 +523,7 @@ static void radeon_flip_work_func(struct work_struct *__work)
                }
                base &= ~7;
        }
+       work->base = base;
 
        r = drm_vblank_get(crtc->dev, radeon_crtc->crtc_id);
        if (r) {
@@ -477,100 +534,42 @@ static void radeon_flip_work_func(struct work_struct *__work)
        /* We borrow the event spin lock for protecting flip_work */
        spin_lock_irqsave(&crtc->dev->event_lock, flags);
 
-       /* set the proper interrupt */
-       radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id);
+       if (radeon_crtc->flip_status != RADEON_FLIP_NONE) {
+               DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
+               spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+               r = -EBUSY;
+               goto vblank_cleanup;
+       }
+       radeon_crtc->flip_status = RADEON_FLIP_PENDING;
+       radeon_crtc->flip_work = work;
 
-       /* do the flip (mmio) */
-       radeon_page_flip(rdev, radeon_crtc->crtc_id, base);
+       /* update crtc fb */
+       crtc->primary->fb = fb;
 
-       radeon_crtc->flip_status = RADEON_FLIP_SUBMITTED;
        spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
-       up_read(&rdev->exclusive_lock);
 
-       return;
+       queue_work(radeon_crtc->flip_queue, &work->flip_work);
+       return 0;
+
+vblank_cleanup:
+       drm_vblank_put(crtc->dev, radeon_crtc->crtc_id);
 
 pflip_cleanup:
-       if (unlikely(radeon_bo_reserve(work->new_rbo, false) != 0)) {
+       if (unlikely(radeon_bo_reserve(new_rbo, false) != 0)) {
                DRM_ERROR("failed to reserve new rbo in error path\n");
                goto cleanup;
        }
-       if (unlikely(radeon_bo_unpin(work->new_rbo) != 0)) {
+       if (unlikely(radeon_bo_unpin(new_rbo) != 0)) {
                DRM_ERROR("failed to unpin new rbo in error path\n");
        }
-       radeon_bo_unreserve(work->new_rbo);
+       radeon_bo_unreserve(new_rbo);
 
 cleanup:
        drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
        radeon_fence_unref(&work->fence);
        kfree(work);
-       up_read(&rdev->exclusive_lock);
-}
-
-static int radeon_crtc_page_flip(struct drm_crtc *crtc,
-                                struct drm_framebuffer *fb,
-                                struct drm_pending_vblank_event *event,
-                                uint32_t page_flip_flags)
-{
-       struct drm_device *dev = crtc->dev;
-       struct radeon_device *rdev = dev->dev_private;
-       struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
-       struct radeon_framebuffer *old_radeon_fb;
-       struct radeon_framebuffer *new_radeon_fb;
-       struct drm_gem_object *obj;
-       struct radeon_flip_work *work;
-       unsigned long flags;
-
-       work = kzalloc(sizeof *work, GFP_KERNEL);
-       if (work == NULL)
-               return -ENOMEM;
-
-       INIT_WORK(&work->flip_work, radeon_flip_work_func);
-       INIT_WORK(&work->unpin_work, radeon_unpin_work_func);
-
-       work->rdev = rdev;
-       work->crtc_id = radeon_crtc->crtc_id;
-       work->fb = fb;
-       work->event = event;
-
-       /* schedule unpin of the old buffer */
-       old_radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
-       obj = old_radeon_fb->obj;
-
-       /* take a reference to the old object */
-       drm_gem_object_reference(obj);
-       work->old_rbo = gem_to_radeon_bo(obj);
-
-       new_radeon_fb = to_radeon_framebuffer(fb);
-       obj = new_radeon_fb->obj;
-       work->new_rbo = gem_to_radeon_bo(obj);
-
-       spin_lock(&work->new_rbo->tbo.bdev->fence_lock);
-       if (work->new_rbo->tbo.sync_obj)
-               work->fence = radeon_fence_ref(work->new_rbo->tbo.sync_obj);
-       spin_unlock(&work->new_rbo->tbo.bdev->fence_lock);
-
-       /* We borrow the event spin lock for protecting flip_work */
-       spin_lock_irqsave(&crtc->dev->event_lock, flags);
 
-       if (radeon_crtc->flip_status != RADEON_FLIP_NONE) {
-               DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
-               spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
-               drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
-               radeon_fence_unref(&work->fence);
-               kfree(work);
-               return -EBUSY;
-       }
-       radeon_crtc->flip_status = RADEON_FLIP_PENDING;
-       radeon_crtc->flip_work = work;
-
-       /* update crtc fb */
-       crtc->primary->fb = fb;
-
-       spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
-
-       queue_work(radeon_crtc->flip_queue, &work->flip_work);
-
-       return 0;
+       return r;
 }
 
 static int
@@ -830,6 +829,10 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
        struct radeon_device *rdev = dev->dev_private;
        int ret = 0;
 
+       /* don't leak the edid if we already fetched it in detect() */
+       if (radeon_connector->edid)
+               goto got_edid;
+
        /* on hw with routers, select right port */
        if (radeon_connector->router.ddc_valid)
                radeon_router_select_ddc_port(radeon_connector);
@@ -868,6 +871,7 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
                        radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
        }
        if (radeon_connector->edid) {
+got_edid:
                drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
                ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid);
                drm_edid_to_eld(&radeon_connector->base, radeon_connector->edid);
index cb1421369e3a2eaa111a8c6d5f3b1e248e72beec..e9e361084249c7deddd0dd680ced4d95a845d31d 100644 (file)
@@ -173,7 +173,7 @@ int radeon_dpm = -1;
 int radeon_aspm = -1;
 int radeon_runtime_pm = -1;
 int radeon_hard_reset = 0;
-int radeon_vm_size = 4096;
+int radeon_vm_size = 4;
 int radeon_vm_block_size = 9;
 int radeon_deep_color = 0;
 
@@ -243,7 +243,7 @@ module_param_named(runpm, radeon_runtime_pm, int, 0444);
 MODULE_PARM_DESC(hard_reset, "PCI config reset (1 = force enable, 0 = disable (default))");
 module_param_named(hard_reset, radeon_hard_reset, int, 0444);
 
-MODULE_PARM_DESC(vm_size, "VM address space size in megabytes (default 4GB)");
+MODULE_PARM_DESC(vm_size, "VM address space size in gigabytes (default 4GB)");
 module_param_named(vm_size, radeon_vm_size, int, 0444);
 
 MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default 9)");
index 35d931881b4b80bb5f6989580e993af8796e7041..d25ae6acfd5a05d3bc3ab9775cc3dc604f27554a 100644 (file)
@@ -579,7 +579,7 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
        /* new gpu have virtual address space support */
        if (rdev->family >= CHIP_CAYMAN) {
                struct radeon_fpriv *fpriv;
-               struct radeon_bo_va *bo_va;
+               struct radeon_vm *vm;
                int r;
 
                fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
@@ -587,7 +587,8 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
                        return -ENOMEM;
                }
 
-               r = radeon_vm_init(rdev, &fpriv->vm);
+               vm = &fpriv->vm;
+               r = radeon_vm_init(rdev, vm);
                if (r) {
                        kfree(fpriv);
                        return r;
@@ -596,22 +597,23 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
                if (rdev->accel_working) {
                        r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
                        if (r) {
-                               radeon_vm_fini(rdev, &fpriv->vm);
+                               radeon_vm_fini(rdev, vm);
                                kfree(fpriv);
                                return r;
                        }
 
                        /* map the ib pool buffer read only into
                         * virtual address space */
-                       bo_va = radeon_vm_bo_add(rdev, &fpriv->vm,
-                                                rdev->ring_tmp_bo.bo);
-                       r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
+                       vm->ib_bo_va = radeon_vm_bo_add(rdev, vm,
+                                                       rdev->ring_tmp_bo.bo);
+                       r = radeon_vm_bo_set_addr(rdev, vm->ib_bo_va,
+                                                 RADEON_VA_IB_OFFSET,
                                                  RADEON_VM_PAGE_READABLE |
                                                  RADEON_VM_PAGE_SNOOPED);
 
                        radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
                        if (r) {
-                               radeon_vm_fini(rdev, &fpriv->vm);
+                               radeon_vm_fini(rdev, vm);
                                kfree(fpriv);
                                return r;
                        }
@@ -640,21 +642,19 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
        /* new gpu have virtual address space support */
        if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) {
                struct radeon_fpriv *fpriv = file_priv->driver_priv;
-               struct radeon_bo_va *bo_va;
+               struct radeon_vm *vm = &fpriv->vm;
                int r;
 
                if (rdev->accel_working) {
                        r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
                        if (!r) {
-                               bo_va = radeon_vm_bo_find(&fpriv->vm,
-                                                         rdev->ring_tmp_bo.bo);
-                               if (bo_va)
-                                       radeon_vm_bo_rmv(rdev, bo_va);
+                               if (vm->ib_bo_va)
+                                       radeon_vm_bo_rmv(rdev, vm->ib_bo_va);
                                radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
                        }
                }
 
-               radeon_vm_fini(rdev, &fpriv->vm);
+               radeon_vm_fini(rdev, vm);
                kfree(fpriv);
                file_priv->driver_priv = NULL;
        }
index eecff6bbd34145c6bf975ca0f2f19ff9af8534ba..725d3669014f8ef2bbd61f637d5e3e98de2772eb 100644 (file)
@@ -332,6 +332,7 @@ struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
        bo_va->ref_count = 1;
        INIT_LIST_HEAD(&bo_va->bo_list);
        INIT_LIST_HEAD(&bo_va->vm_list);
+       INIT_LIST_HEAD(&bo_va->vm_status);
 
        mutex_lock(&vm->mutex);
        list_add(&bo_va->vm_list, &vm->va);
@@ -468,6 +469,19 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
                head = &tmp->vm_list;
        }
 
+       if (bo_va->soffset) {
+               /* add a clone of the bo_va to clear the old address */
+               tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
+               if (!tmp) {
+                       mutex_unlock(&vm->mutex);
+                       return -ENOMEM;
+               }
+               tmp->soffset = bo_va->soffset;
+               tmp->eoffset = bo_va->eoffset;
+               tmp->vm = vm;
+               list_add(&tmp->vm_status, &vm->freed);
+       }
+
        bo_va->soffset = soffset;
        bo_va->eoffset = eoffset;
        bo_va->flags = flags;
@@ -823,25 +837,19 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
  * Object have to be reserved and mutex must be locked!
  */
 int radeon_vm_bo_update(struct radeon_device *rdev,
-                       struct radeon_vm *vm,
-                       struct radeon_bo *bo,
+                       struct radeon_bo_va *bo_va,
                        struct ttm_mem_reg *mem)
 {
+       struct radeon_vm *vm = bo_va->vm;
        struct radeon_ib ib;
-       struct radeon_bo_va *bo_va;
        unsigned nptes, ndw;
        uint64_t addr;
        int r;
 
-       bo_va = radeon_vm_bo_find(vm, bo);
-       if (bo_va == NULL) {
-               dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
-               return -EINVAL;
-       }
 
        if (!bo_va->soffset) {
                dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n",
-                       bo, vm);
+                       bo_va->bo, vm);
                return -EINVAL;
        }
 
@@ -868,7 +876,7 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
 
        trace_radeon_vm_bo_update(bo_va);
 
-       nptes = radeon_bo_ngpu_pages(bo);
+       nptes = (bo_va->eoffset - bo_va->soffset) / RADEON_GPU_PAGE_SIZE;
 
        /* padding, etc. */
        ndw = 64;
@@ -910,6 +918,34 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
        return 0;
 }
 
+/**
+ * radeon_vm_clear_freed - clear freed BOs in the PT
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ *
+ * Make sure all freed BOs are cleared in the PT.
+ * Returns 0 for success.
+ *
+ * PTs have to be reserved and mutex must be locked!
+ */
+int radeon_vm_clear_freed(struct radeon_device *rdev,
+                         struct radeon_vm *vm)
+{
+       struct radeon_bo_va *bo_va, *tmp;
+       int r;
+
+       list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) {
+               list_del(&bo_va->vm_status);
+               r = radeon_vm_bo_update(rdev, bo_va, NULL);
+               kfree(bo_va);
+               if (r)
+                       return r;
+       }
+       return 0;
+
+}
+
 /**
  * radeon_vm_bo_rmv - remove a bo to a specific vm
  *
@@ -917,27 +953,27 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
  * @bo_va: requested bo_va
  *
  * Remove @bo_va->bo from the requested vm (cayman+).
- * Remove @bo_va->bo from the list of bos associated with the bo_va->vm and
- * remove the ptes for @bo_va in the page table.
- * Returns 0 for success.
  *
  * Object have to be reserved!
  */
-int radeon_vm_bo_rmv(struct radeon_device *rdev,
-                    struct radeon_bo_va *bo_va)
+void radeon_vm_bo_rmv(struct radeon_device *rdev,
+                     struct radeon_bo_va *bo_va)
 {
-       int r = 0;
+       struct radeon_vm *vm = bo_va->vm;
 
-       mutex_lock(&bo_va->vm->mutex);
-       if (bo_va->soffset)
-               r = radeon_vm_bo_update(rdev, bo_va->vm, bo_va->bo, NULL);
+       list_del(&bo_va->bo_list);
 
+       mutex_lock(&vm->mutex);
        list_del(&bo_va->vm_list);
-       mutex_unlock(&bo_va->vm->mutex);
-       list_del(&bo_va->bo_list);
 
-       kfree(bo_va);
-       return r;
+       if (bo_va->soffset) {
+               bo_va->bo = NULL;
+               list_add(&bo_va->vm_status, &vm->freed);
+       } else {
+               kfree(bo_va);
+       }
+
+       mutex_unlock(&vm->mutex);
 }
 
 /**
@@ -975,11 +1011,13 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
        int r;
 
        vm->id = 0;
+       vm->ib_bo_va = NULL;
        vm->fence = NULL;
        vm->last_flush = NULL;
        vm->last_id_use = NULL;
        mutex_init(&vm->mutex);
        INIT_LIST_HEAD(&vm->va);
+       INIT_LIST_HEAD(&vm->freed);
 
        pd_size = radeon_vm_directory_size(rdev);
        pd_entries = radeon_vm_num_pdes(rdev);
@@ -1034,7 +1072,8 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
                        kfree(bo_va);
                }
        }
-
+       list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status)
+               kfree(bo_va);
 
        for (i = 0; i < radeon_vm_num_pdes(rdev); i++)
                radeon_bo_unref(&vm->page_tables[i].bo);
index 237dd29d9f1c893b1e17600fec59660848b394c3..3e21e869015fece1124e7093bfd028e99be2e909 100644 (file)
@@ -406,8 +406,9 @@ void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
        for (i = 0; i < rdev->num_crtc; i++) {
                if (save->crtc_enabled[i]) {
                        tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i]);
-                       if ((tmp & 0x3) != 0) {
-                               tmp &= ~0x3;
+                       if ((tmp & 0x7) != 3) {
+                               tmp &= ~0x7;
+                               tmp |= 0x3;
                                WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
                        }
                        tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]);
index eba0225259a457a3d494f39bfee13af0cdc865e6..9e854fd016dabac99c081896209ce9f732dc34f0 100644 (file)
@@ -6103,6 +6103,7 @@ static inline u32 si_get_ih_wptr(struct radeon_device *rdev)
                tmp = RREG32(IH_RB_CNTL);
                tmp |= IH_WPTR_OVERFLOW_CLEAR;
                WREG32(IH_RB_CNTL, tmp);
+               wptr &= ~RB_OVERFLOW;
        }
        return (wptr & rdev->ih.ptr_mask);
 }
index 20da6ff183df9b6fb5b59554616a30f28aeeb96f..32e50be9c4ac1c41969fd9de2d1a6a3439a6ef27 100644 (file)
@@ -1874,15 +1874,16 @@ int trinity_dpm_init(struct radeon_device *rdev)
        for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
                pi->at[i] = TRINITY_AT_DFLT;
 
-       /* There are stability issues reported on latops with
-        * bapm installed when switching between AC and battery
-        * power.  At the same time, some desktop boards hang
-        * if it's not enabled and dpm is enabled.
+       /* There are stability issues reported on with
+        * bapm enabled when switching between AC and battery
+        * power.  At the same time, some MSI boards hang
+        * if it's not enabled and dpm is enabled.  Just enable
+        * it for MSI boards right now.
         */
-       if (rdev->flags & RADEON_IS_MOBILITY)
-               pi->enable_bapm = false;
-       else
+       if (rdev->pdev->subsystem_vendor == 0x1462)
                pi->enable_bapm = true;
+       else
+               pi->enable_bapm = false;
        pi->enable_nbps_policy = true;
        pi->enable_sclk_ds = true;
        pi->enable_gfx_power_gating = true;
index eaaa3d843b8052c4c0d5b513567da720c6cfbe7a..23b2ce294c4ca78ea3f961ee102550e4aeb02632 100644 (file)
@@ -246,8 +246,8 @@ void hv_fcopy_onchannelcallback(void *context)
                /*
                 * Send the information to the user-level daemon.
                 */
-               fcopy_send_data();
                schedule_delayed_work(&fcopy_work, 5*HZ);
+               fcopy_send_data();
                return;
        }
        icmsghdr->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE;
index 0f4dea5ccf171a8ce41a65aa3befead25c6f80ed..9ee3913850d682f5a2d2557e333a362833ace4b5 100644 (file)
@@ -515,7 +515,7 @@ static ssize_t set_temp_min(struct device *dev,
                return -EINVAL;
 
        temp = DIV_ROUND_CLOSEST(temp, 1000);
-       temp = clamp_val(temp, 0, 255);
+       temp = clamp_val(temp, -128, 127);
 
        mutex_lock(&data->lock);
        data->temp_min[attr->index] = temp;
@@ -549,7 +549,7 @@ static ssize_t set_temp_max(struct device *dev,
                return -EINVAL;
 
        temp = DIV_ROUND_CLOSEST(temp, 1000);
-       temp = clamp_val(temp, 0, 255);
+       temp = clamp_val(temp, -128, 127);
 
        mutex_lock(&data->lock);
        data->temp_max[attr->index] = temp;
@@ -826,7 +826,7 @@ static ssize_t set_pwm_tmin(struct device *dev,
                return -EINVAL;
 
        temp = DIV_ROUND_CLOSEST(temp, 1000);
-       temp = clamp_val(temp, 0, 255);
+       temp = clamp_val(temp, -128, 127);
 
        mutex_lock(&data->lock);
        data->pwm_tmin[attr->index] = temp;
index afd31042b452073e50c1227a5e9d3cde5086b8ff..d14ab3c45daa32c88c7423f8508b38ed015aad49 100644 (file)
@@ -194,7 +194,7 @@ static ssize_t da9052_hwmon_show_name(struct device *dev,
                                      struct device_attribute *devattr,
                                      char *buf)
 {
-       return sprintf(buf, "da9052-hwmon\n");
+       return sprintf(buf, "da9052\n");
 }
 
 static ssize_t show_label(struct device *dev,
index 73b3865f1207ada0b74949dba3d5e4636086c21b..35eb7738d7119cf2d706d5cd60323b04e9e76988 100644 (file)
@@ -204,7 +204,7 @@ static ssize_t da9055_hwmon_show_name(struct device *dev,
                                      struct device_attribute *devattr,
                                      char *buf)
 {
-       return sprintf(buf, "da9055-hwmon\n");
+       return sprintf(buf, "da9055\n");
 }
 
 static ssize_t show_label(struct device *dev,
index efee4c59239fcff8aa7b675c01cb5b9ac6bab5bd..34b9a601ad078c394513e67a5dccc345c6c02fef 100644 (file)
@@ -86,7 +86,7 @@ static inline u8 IN_TO_REG(unsigned long val, int n)
  */
 static inline s8 TEMP_TO_REG(int val)
 {
-       return clamp_val(SCALE(val, 1, 1000), -128000, 127000);
+       return SCALE(clamp_val(val, -128000, 127000), 1, 1000);
 }
 
 static inline int TEMP_FROM_REG(s8 val)
@@ -384,6 +384,8 @@ static ssize_t set_vrm(struct device *dev, struct device_attribute *attr,
        err = kstrtoul(buf, 10, &val);
        if (err)
                return err;
+       if (val > 255)
+               return -EINVAL;
 
        data->vrm = val;
        return count;
index 8fb46aab2d87d5bf85ddc2a1b68ca38341bcb058..a04c49f2a0118a887e22c16b9716656af6e87723 100644 (file)
@@ -416,6 +416,7 @@ config BLK_DEV_CY82C693
 
 config BLK_DEV_CS5520
        tristate "Cyrix CS5510/20 MediaGX chipset support (VERY EXPERIMENTAL)"
+       depends on X86_32 || COMPILE_TEST
        select BLK_DEV_IDEDMA_PCI
        help
          Include support for PIO tuning and virtual DMA on the Cyrix MediaGX
@@ -426,6 +427,7 @@ config BLK_DEV_CS5520
 
 config BLK_DEV_CS5530
        tristate "Cyrix/National Semiconductor CS5530 MediaGX chipset support"
+       depends on X86_32 || COMPILE_TEST
        select BLK_DEV_IDEDMA_PCI
        help
          Include support for UDMA on the Cyrix MediaGX 5530 chipset. This
@@ -435,7 +437,7 @@ config BLK_DEV_CS5530
 
 config BLK_DEV_CS5535
        tristate "AMD CS5535 chipset support"
-       depends on X86 && !X86_64
+       depends on X86_32
        select BLK_DEV_IDEDMA_PCI
        help
          Include support for UDMA on the NSC/AMD CS5535 companion chipset.
@@ -486,6 +488,7 @@ config BLK_DEV_JMICRON
 
 config BLK_DEV_SC1200
        tristate "National SCx200 chipset support"
+       depends on X86_32 || COMPILE_TEST
        select BLK_DEV_IDEDMA_PCI
        help
          This driver adds support for the on-board IDE controller on the
index 2a744a91370e640d1fe3603409b80e776d8041de..a3d3b1733c49c667f7f1495d9dfef23623e28fa0 100644 (file)
@@ -853,8 +853,9 @@ static int init_irq (ide_hwif_t *hwif)
        if (irq_handler == NULL)
                irq_handler = ide_intr;
 
-       if (request_irq(hwif->irq, irq_handler, sa, hwif->name, hwif))
-               goto out_up;
+       if (!host->get_lock)
+               if (request_irq(hwif->irq, irq_handler, sa, hwif->name, hwif))
+                       goto out_up;
 
 #if !defined(__mc68000__)
        printk(KERN_INFO "%s at 0x%03lx-0x%03lx,0x%03lx on irq %d", hwif->name,
@@ -1533,7 +1534,8 @@ static void ide_unregister(ide_hwif_t *hwif)
 
        ide_proc_unregister_port(hwif);
 
-       free_irq(hwif->irq, hwif);
+       if (!hwif->host->get_lock)
+               free_irq(hwif->irq, hwif);
 
        device_unregister(hwif->portdev);
        device_unregister(&hwif->gendev);
index 17aeea1705665f799990dd7ae69029c7cf09330c..2a5fa9a436e5cd2c7f495207717e0709774b8d48 100644 (file)
@@ -111,8 +111,14 @@ static const int mma8452_samp_freq[8][2] = {
        {6, 250000}, {1, 560000}
 };
 
+/* 
+ * Hardware has fullscale of -2G, -4G, -8G corresponding to raw value -2048
+ * The userspace interface uses m/s^2 and we declare micro units
+ * So scale factor is given by:
+ *     g * N * 1000000 / 2048 for N = 2, 4, 8 and g=9.80665
+ */
 static const int mma8452_scales[3][2] = {
-       {0, 977}, {0, 1953}, {0, 3906}
+       {0, 9577}, {0, 19154}, {0, 38307}
 };
 
 static ssize_t mma8452_show_samp_freq_avail(struct device *dev,
index 258a973a1fb8da2d23457fe3afc139f0f9befcff..bfbf4d419f41c391ae4be154adaa125956af6558 100644 (file)
@@ -345,6 +345,9 @@ static int iio_device_add_event(struct iio_dev *indio_dev,
                        &indio_dev->event_interface->dev_attr_list);
                kfree(postfix);
 
+               if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE))
+                       continue;
+
                if (ret)
                        return ret;
 
index 5e153f6d4b48f2d36abcceec8dcee0996e5bf6d1..768a0fb67dd6d5995545f40037aa2c5157548ce1 100644 (file)
@@ -432,8 +432,17 @@ static void arp_failure_discard(void *handle, struct sk_buff *skb)
  */
 static void act_open_req_arp_failure(void *handle, struct sk_buff *skb)
 {
+       struct c4iw_ep *ep = handle;
+
        printk(KERN_ERR MOD "ARP failure duing connect\n");
        kfree_skb(skb);
+       connect_reply_upcall(ep, -EHOSTUNREACH);
+       state_set(&ep->com, DEAD);
+       remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
+       cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
+       dst_release(ep->dst);
+       cxgb4_l2t_release(ep->l2t);
+       c4iw_put_ep(&ep->com);
 }
 
 /*
@@ -658,7 +667,7 @@ static int send_connect(struct c4iw_ep *ep)
                opt2 |= T5_OPT_2_VALID;
                opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE);
        }
-       t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure);
+       t4_set_arp_err_handler(skb, ep, act_open_req_arp_failure);
 
        if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) {
                if (ep->com.remote_addr.ss_family == AF_INET) {
@@ -2180,7 +2189,6 @@ static void reject_cr(struct c4iw_dev *dev, u32 hwtid, struct sk_buff *skb)
        PDBG("%s c4iw_dev %p tid %u\n", __func__, dev, hwtid);
        BUG_ON(skb_cloned(skb));
        skb_trim(skb, sizeof(struct cpl_tid_release));
-       skb_get(skb);
        release_tid(&dev->rdev, hwtid, skb);
        return;
 }
@@ -3917,7 +3925,7 @@ int __init c4iw_cm_init(void)
        return 0;
 }
 
-void __exit c4iw_cm_term(void)
+void c4iw_cm_term(void)
 {
        WARN_ON(!list_empty(&timeout_list));
        flush_workqueue(workq);
index dd93aadc996e1ca15e0739a1d60e9c4930b449b8..7db82b24302b63c576b8873565782987f6d33e0e 100644 (file)
@@ -696,6 +696,7 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
                pr_err(MOD "error allocating status page\n");
                goto err4;
        }
+       rdev->status_page->db_off = 0;
        return 0;
 err4:
        c4iw_rqtpool_destroy(rdev);
@@ -729,7 +730,6 @@ static void c4iw_dealloc(struct uld_ctx *ctx)
        if (ctx->dev->rdev.oc_mw_kva)
                iounmap(ctx->dev->rdev.oc_mw_kva);
        ib_dealloc_device(&ctx->dev->ibdev);
-       iwpm_exit(RDMA_NL_C4IW);
        ctx->dev = NULL;
 }
 
@@ -826,12 +826,6 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
                setup_debugfs(devp);
        }
 
-       ret = iwpm_init(RDMA_NL_C4IW);
-       if (ret) {
-               pr_err("port mapper initialization failed with %d\n", ret);
-               ib_dealloc_device(&devp->ibdev);
-               return ERR_PTR(ret);
-       }
 
        return devp;
 }
@@ -1332,6 +1326,15 @@ static int __init c4iw_init_module(void)
                pr_err("%s[%u]: Failed to add netlink callback\n"
                       , __func__, __LINE__);
 
+       err = iwpm_init(RDMA_NL_C4IW);
+       if (err) {
+               pr_err("port mapper initialization failed with %d\n", err);
+               ibnl_remove_client(RDMA_NL_C4IW);
+               c4iw_cm_term();
+               debugfs_remove_recursive(c4iw_debugfs_root);
+               return err;
+       }
+
        cxgb4_register_uld(CXGB4_ULD_RDMA, &c4iw_uld_info);
 
        return 0;
@@ -1349,6 +1352,7 @@ static void __exit c4iw_exit_module(void)
        }
        mutex_unlock(&dev_mutex);
        cxgb4_unregister_uld(CXGB4_ULD_RDMA);
+       iwpm_exit(RDMA_NL_C4IW);
        ibnl_remove_client(RDMA_NL_C4IW);
        c4iw_cm_term();
        debugfs_remove_recursive(c4iw_debugfs_root);
index 125bc5d1e175ba4b18085fa0323dd304689b504b..361fff7a07427197582348621d82c626629fbabf 100644 (file)
@@ -908,7 +908,7 @@ int c4iw_destroy_ctrl_qp(struct c4iw_rdev *rdev);
 int c4iw_register_device(struct c4iw_dev *dev);
 void c4iw_unregister_device(struct c4iw_dev *dev);
 int __init c4iw_cm_init(void);
-void __exit c4iw_cm_term(void);
+void c4iw_cm_term(void);
 void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
                               struct c4iw_dev_ucontext *uctx);
 void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
index d13ddf1c0033385f9b2d6b5dadac29d87aaeaf45..bbbcf389272cbde11ee754d3efacae0d3c700190 100644 (file)
@@ -675,7 +675,7 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
        int err;
 
        uuari = &dev->mdev.priv.uuari;
-       if (init_attr->create_flags & ~IB_QP_CREATE_SIGNATURE_EN)
+       if (init_attr->create_flags & ~(IB_QP_CREATE_SIGNATURE_EN | IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK))
                return -EINVAL;
 
        if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR)
index 1c4c0db055509cc45df2e1865a5c0b6d86e25fc9..29ca0bb4f561e6af3960218d027bfd4ac6b300fa 100644 (file)
@@ -257,9 +257,10 @@ static int input_handle_abs_event(struct input_dev *dev,
 }
 
 static int input_get_disposition(struct input_dev *dev,
-                         unsigned int type, unsigned int code, int value)
+                         unsigned int type, unsigned int code, int *pval)
 {
        int disposition = INPUT_IGNORE_EVENT;
+       int value = *pval;
 
        switch (type) {
 
@@ -357,6 +358,7 @@ static int input_get_disposition(struct input_dev *dev,
                break;
        }
 
+       *pval = value;
        return disposition;
 }
 
@@ -365,7 +367,7 @@ static void input_handle_event(struct input_dev *dev,
 {
        int disposition;
 
-       disposition = input_get_disposition(dev, type, code, value);
+       disposition = input_get_disposition(dev, type, code, &value);
 
        if ((disposition & INPUT_PASS_TO_DEVICE) && dev->event)
                dev->event(dev, type, code, value);
index 758b48731415a26bceff0fdb81ddc08cec0cc2c7..de7be4f03d9193884d248d506b6f75a306dbc17a 100644 (file)
@@ -215,6 +215,7 @@ static int keyscan_probe(struct platform_device *pdev)
        return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
 static int keyscan_suspend(struct device *dev)
 {
        struct platform_device *pdev = to_platform_device(dev);
@@ -249,6 +250,7 @@ static int keyscan_resume(struct device *dev)
        mutex_unlock(&input->mutex);
        return retval;
 }
+#endif
 
 static SIMPLE_DEV_PM_OPS(keyscan_dev_pm_ops, keyscan_suspend, keyscan_resume);
 
index e4104f9b2e6d7efb34e9e60812fdb154c90342f4..fed5102e1802075b6fd9318c2bd8aad044b317b0 100644 (file)
@@ -213,7 +213,7 @@ static struct platform_driver sirfsoc_pwrc_driver = {
 
 module_platform_driver(sirfsoc_pwrc_driver);
 
-MODULE_LICENSE("GPLv2");
+MODULE_LICENSE("GPL v2");
 MODULE_AUTHOR("Binghua Duan <Binghua.Duan@csr.com>, Xianglong Du <Xianglong.Du@csr.com>");
 MODULE_DESCRIPTION("CSR Prima2 PWRC Driver");
 MODULE_ALIAS("platform:sirfsoc-pwrc");
index ec772d962f06800fc92915fa27d51b25fca1e56f..ef9e0b8a9aa754b6b49e26b7b050c2a7bcf1f078 100644 (file)
@@ -132,7 +132,8 @@ static const struct min_max_quirk min_max_pnpid_table[] = {
                1232, 5710, 1156, 4696
        },
        {
-               (const char * const []){"LEN0034", "LEN0036", "LEN2004", NULL},
+               (const char * const []){"LEN0034", "LEN0036", "LEN2002",
+                                       "LEN2004", NULL},
                1024, 5112, 2024, 4832
        },
        {
@@ -168,7 +169,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
        "LEN0049",
        "LEN2000",
        "LEN2001", /* Edge E431 */
-       "LEN2002",
+       "LEN2002", /* Edge E531 */
        "LEN2003",
        "LEN2004", /* L440 */
        "LEN2005",
index 381b20d4c5618d8fc7f5c3e616479d4663d9ae03..136b7b204f56c8d111812559384d6acd8e732514 100644 (file)
@@ -401,6 +401,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1360"),
                },
        },
+       {
+               /* Acer Aspire 5710 */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5710"),
+               },
+       },
        {
                /* Gericom Bellagio */
                .matches = {
index 977d05cd9e2ea707c08c5eb6d3c9a4fd1a07adae..e73cf2c71f355993c564cee0a05b7cbd34e5f435 100644 (file)
@@ -1217,9 +1217,9 @@ static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data)
                         * a=(pi*r^2)/C.
                         */
                        int a = data[5];
-                       int x_res  = input_abs_get_res(input, ABS_X);
-                       int y_res  = input_abs_get_res(input, ABS_Y);
-                       width  = 2 * int_sqrt(a * WACOM_CONTACT_AREA_SCALE);
+                       int x_res = input_abs_get_res(input, ABS_MT_POSITION_X);
+                       int y_res = input_abs_get_res(input, ABS_MT_POSITION_Y);
+                       width = 2 * int_sqrt(a * WACOM_CONTACT_AREA_SCALE);
                        height = width * y_res / x_res;
                }
 
@@ -1587,7 +1587,7 @@ static void wacom_abs_set_axis(struct input_dev *input_dev,
                input_abs_set_res(input_dev, ABS_X, features->x_resolution);
                input_abs_set_res(input_dev, ABS_Y, features->y_resolution);
        } else {
-               if (features->touch_max <= 2) {
+               if (features->touch_max == 1) {
                        input_set_abs_params(input_dev, ABS_X, 0,
                                features->x_max, features->x_fuzz, 0);
                        input_set_abs_params(input_dev, ABS_Y, 0,
@@ -1815,14 +1815,8 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
        case MTTPC:
        case MTTPC_B:
        case TABLETPC2FG:
-               if (features->device_type == BTN_TOOL_FINGER) {
-                       unsigned int flags = INPUT_MT_DIRECT;
-
-                       if (wacom_wac->features.type == TABLETPC2FG)
-                               flags = 0;
-
-                       input_mt_init_slots(input_dev, features->touch_max, flags);
-               }
+               if (features->device_type == BTN_TOOL_FINGER && features->touch_max > 1)
+                       input_mt_init_slots(input_dev, features->touch_max, INPUT_MT_DIRECT);
                /* fall through */
 
        case TABLETPC:
@@ -1883,10 +1877,6 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
                        __set_bit(BTN_RIGHT, input_dev->keybit);
 
                        if (features->touch_max) {
-                               /* touch interface */
-                               unsigned int flags = INPUT_MT_POINTER;
-
-                               __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
                                if (features->pktlen == WACOM_PKGLEN_BBTOUCH3) {
                                        input_set_abs_params(input_dev,
                                                     ABS_MT_TOUCH_MAJOR,
@@ -1894,12 +1884,8 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
                                        input_set_abs_params(input_dev,
                                                     ABS_MT_TOUCH_MINOR,
                                                     0, features->y_max, 0, 0);
-                               } else {
-                                       __set_bit(BTN_TOOL_FINGER, input_dev->keybit);
-                                       __set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit);
-                                       flags = 0;
                                }
-                               input_mt_init_slots(input_dev, features->touch_max, flags);
+                               input_mt_init_slots(input_dev, features->touch_max, INPUT_MT_POINTER);
                        } else {
                                /* buttons/keys only interface */
                                __clear_bit(ABS_X, input_dev->absbit);
index 4e793a17361f7b2e02955e7e6e02ece9151f8063..2ce649520fe0d67021509b7e829cf1dbb9858e19 100644 (file)
@@ -359,9 +359,12 @@ static int titsc_parse_dt(struct platform_device *pdev,
         */
        err = of_property_read_u32(node, "ti,coordinate-readouts",
                        &ts_dev->coordinate_readouts);
-       if (err < 0)
+       if (err < 0) {
+               dev_warn(&pdev->dev, "please use 'ti,coordinate-readouts' instead\n");
                err = of_property_read_u32(node, "ti,coordiante-readouts",
                                &ts_dev->coordinate_readouts);
+       }
+
        if (err < 0)
                return err;
 
index b99dd88e31b9b20b41bccde26fd54789a4d61250..bb446d742a2d81699824b17e5400fa9dc1eb9082 100644 (file)
@@ -170,10 +170,10 @@ int pamu_disable_liodn(int liodn)
 static unsigned int map_addrspace_size_to_wse(phys_addr_t addrspace_size)
 {
        /* Bug if not a power of 2 */
-       BUG_ON(!is_power_of_2(addrspace_size));
+       BUG_ON((addrspace_size & (addrspace_size - 1)));
 
        /* window size is 2^(WSE+1) bytes */
-       return __ffs(addrspace_size) - 1;
+       return fls64(addrspace_size) - 2;
 }
 
 /* Derive the PAACE window count encoding for the subwindow count */
@@ -351,7 +351,7 @@ int pamu_config_ppaace(int liodn, phys_addr_t win_addr, phys_addr_t win_size,
        struct paace *ppaace;
        unsigned long fspi;
 
-       if (!is_power_of_2(win_size) || win_size < PAMU_PAGE_SIZE) {
+       if ((win_size & (win_size - 1)) || win_size < PAMU_PAGE_SIZE) {
                pr_debug("window size too small or not a power of two %llx\n", win_size);
                return -EINVAL;
        }
@@ -464,7 +464,7 @@ int pamu_config_spaace(int liodn, u32 subwin_cnt, u32 subwin,
                return -ENOENT;
        }
 
-       if (!is_power_of_2(subwin_size) || subwin_size < PAMU_PAGE_SIZE) {
+       if ((subwin_size & (subwin_size - 1)) || subwin_size < PAMU_PAGE_SIZE) {
                pr_debug("subwindow size out of range, or not a power of 2\n");
                return -EINVAL;
        }
index 93072ba44b1d179dff9a486cd728cf16ea645691..af47648301a9eda024b842341e451469eada4c1c 100644 (file)
@@ -301,7 +301,7 @@ static int check_size(u64 size, dma_addr_t iova)
         * Size must be a power of two and at least be equal
         * to PAMU page size.
         */
-       if (!is_power_of_2(size) || size < PAMU_PAGE_SIZE) {
+       if ((size & (size - 1)) || size < PAMU_PAGE_SIZE) {
                pr_debug("%s: size too small or not a power of two\n", __func__);
                return -EINVAL;
        }
@@ -335,11 +335,6 @@ static struct fsl_dma_domain *iommu_alloc_dma_domain(void)
        return domain;
 }
 
-static inline struct device_domain_info *find_domain(struct device *dev)
-{
-       return dev->archdata.iommu_domain;
-}
-
 static void remove_device_ref(struct device_domain_info *info, u32 win_cnt)
 {
        unsigned long flags;
@@ -380,7 +375,7 @@ static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct d
         * Check here if the device is already attached to domain or not.
         * If the device is already attached to a domain detach it.
         */
-       old_domain_info = find_domain(dev);
+       old_domain_info = dev->archdata.iommu_domain;
        if (old_domain_info && old_domain_info->domain != dma_domain) {
                spin_unlock_irqrestore(&device_domain_lock, flags);
                detach_device(dev, old_domain_info->domain);
@@ -399,7 +394,7 @@ static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct d
         * the info for the first LIODN as all
         * LIODNs share the same domain
         */
-       if (!old_domain_info)
+       if (!dev->archdata.iommu_domain)
                dev->archdata.iommu_domain = info;
        spin_unlock_irqrestore(&device_domain_lock, flags);
 
@@ -1042,12 +1037,15 @@ root_bus:
                        group = get_shared_pci_device_group(pdev);
        }
 
+       if (!group)
+               group = ERR_PTR(-ENODEV);
+
        return group;
 }
 
 static int fsl_pamu_add_device(struct device *dev)
 {
-       struct iommu_group *group = NULL;
+       struct iommu_group *group = ERR_PTR(-ENODEV);
        struct pci_dev *pdev;
        const u32 *prop;
        int ret, len;
@@ -1070,7 +1068,7 @@ static int fsl_pamu_add_device(struct device *dev)
                        group = get_device_iommu_group(dev);
        }
 
-       if (!group || IS_ERR(group))
+       if (IS_ERR(group))
                return PTR_ERR(group);
 
        ret = iommu_group_add_device(group, dev);
index 7e11c9d6ae8c8411610987339dc161f208cf2ad2..7c131cf7cc1325bcbcc3d89248b5d66498434d2a 100644 (file)
@@ -42,6 +42,7 @@
 #include <linux/irqchip/chained_irq.h>
 #include <linux/irqchip/arm-gic.h>
 
+#include <asm/cputype.h>
 #include <asm/irq.h>
 #include <asm/exception.h>
 #include <asm/smp_plat.h>
@@ -954,7 +955,9 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
                }
 
                for_each_possible_cpu(cpu) {
-                       unsigned long offset = percpu_offset * cpu_logical_map(cpu);
+                       u32 mpidr = cpu_logical_map(cpu);
+                       u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+                       unsigned long offset = percpu_offset * core_id;
                        *per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset;
                        *per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset;
                }
@@ -1071,8 +1074,10 @@ gic_of_init(struct device_node *node, struct device_node *parent)
        gic_cnt++;
        return 0;
 }
+IRQCHIP_DECLARE(gic_400, "arm,gic-400", gic_of_init);
 IRQCHIP_DECLARE(cortex_a15_gic, "arm,cortex-a15-gic", gic_of_init);
 IRQCHIP_DECLARE(cortex_a9_gic, "arm,cortex-a9-gic", gic_of_init);
+IRQCHIP_DECLARE(cortex_a7_gic, "arm,cortex-a7-gic", gic_of_init);
 IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init);
 IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init);
 
index 0df6691d045c298906c95c1b29415ac93ef24878..8dc791bfaa6fd0b62d9e37e80429cdcdc6d72ebd 100644 (file)
@@ -2059,13 +2059,17 @@ static int l3ni1_cmd_global(struct PStack *st, isdn_ctrl *ic)
                        memcpy(p, ic->parm.ni1_io.data, ic->parm.ni1_io.datalen); /* copy data */
                        l = (p - temp) + ic->parm.ni1_io.datalen; /* total length */
 
-                       if (ic->parm.ni1_io.timeout > 0)
-                               if (!(pc = ni1_new_l3_process(st, -1)))
-                               { free_invoke_id(st, id);
+                       if (ic->parm.ni1_io.timeout > 0) {
+                               pc = ni1_new_l3_process(st, -1);
+                               if (!pc) {
+                                       free_invoke_id(st, id);
                                        return (-2);
                                }
-                       pc->prot.ni1.ll_id = ic->parm.ni1_io.ll_id; /* remember id */
-                       pc->prot.ni1.proc = ic->parm.ni1_io.proc; /* and procedure */
+                               /* remember id */
+                               pc->prot.ni1.ll_id = ic->parm.ni1_io.ll_id;
+                               /* and procedure */
+                               pc->prot.ni1.proc = ic->parm.ni1_io.proc;
+                       }
 
                        if (!(skb = l3_alloc_skb(l)))
                        { free_invoke_id(st, id);
index 61ac6323744602ff27a95b56042c19d5d4e527e1..62f0688d45a500530fd65014432be9fbfd5a7d69 100644 (file)
@@ -442,7 +442,7 @@ static int get_filter(void __user *arg, struct sock_filter **p)
 {
        struct sock_fprog uprog;
        struct sock_filter *code = NULL;
-       int len, err;
+       int len;
 
        if (copy_from_user(&uprog, arg, sizeof(uprog)))
                return -EFAULT;
@@ -458,12 +458,6 @@ static int get_filter(void __user *arg, struct sock_filter **p)
        if (IS_ERR(code))
                return PTR_ERR(code);
 
-       err = sk_chk_filter(code, uprog.len);
-       if (err) {
-               kfree(code);
-               return err;
-       }
-
        *p = code;
        return uprog.len;
 }
@@ -644,9 +638,15 @@ isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg)
                fprog.len = len;
                fprog.filter = code;
 
-               if (is->pass_filter)
+               if (is->pass_filter) {
                        sk_unattached_filter_destroy(is->pass_filter);
-               err = sk_unattached_filter_create(&is->pass_filter, &fprog);
+                       is->pass_filter = NULL;
+               }
+               if (fprog.filter != NULL)
+                       err = sk_unattached_filter_create(&is->pass_filter,
+                                                         &fprog);
+               else
+                       err = 0;
                kfree(code);
 
                return err;
@@ -663,9 +663,15 @@ isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg)
                fprog.len = len;
                fprog.filter = code;
 
-               if (is->active_filter)
+               if (is->active_filter) {
                        sk_unattached_filter_destroy(is->active_filter);
-               err = sk_unattached_filter_create(&is->active_filter, &fprog);
+                       is->active_filter = NULL;
+               }
+               if (fprog.filter != NULL)
+                       err = sk_unattached_filter_create(&is->active_filter,
+                                                         &fprog);
+               else
+                       err = 0;
                kfree(code);
 
                return err;
index 4ead4ba606562b0c33d750064660a4effe0594ea..d2899e7eb3aaf317a93d91978936dac5e3c7f132 100644 (file)
@@ -425,6 +425,15 @@ static int __open_metadata(struct dm_cache_metadata *cmd)
 
        disk_super = dm_block_data(sblock);
 
+       /* Verify the data block size hasn't changed */
+       if (le32_to_cpu(disk_super->data_block_size) != cmd->data_block_size) {
+               DMERR("changing the data block size (from %u to %llu) is not supported",
+                     le32_to_cpu(disk_super->data_block_size),
+                     (unsigned long long)cmd->data_block_size);
+               r = -EINVAL;
+               goto bad;
+       }
+
        r = __check_incompat_features(disk_super, cmd);
        if (r < 0)
                goto bad;
index b086a945edcbccc5106f267aa06109420c69cbd1..e9d33ad59df5e21a9fcdae85e96a27ccb93156cd 100644 (file)
@@ -613,6 +613,15 @@ static int __open_metadata(struct dm_pool_metadata *pmd)
 
        disk_super = dm_block_data(sblock);
 
+       /* Verify the data block size hasn't changed */
+       if (le32_to_cpu(disk_super->data_block_size) != pmd->data_block_size) {
+               DMERR("changing the data block size (from %u to %llu) is not supported",
+                     le32_to_cpu(disk_super->data_block_size),
+                     (unsigned long long)pmd->data_block_size);
+               r = -EINVAL;
+               goto bad_unlock_sblock;
+       }
+
        r = __check_incompat_features(disk_super, pmd);
        if (r < 0)
                goto bad_unlock_sblock;
index 8637d2ed76238b1c76c95d6b6bf2f994f19a3562..2e3cdcfa0a675b2f8835fde25924d262f6ed6264 100644 (file)
@@ -60,7 +60,7 @@ static int si2168_cmd_execute(struct si2168 *s, struct si2168_cmd *cmd)
                                jiffies_to_msecs(jiffies) -
                                (jiffies_to_msecs(timeout) - TIMEOUT));
 
-               if (!(cmd->args[0] >> 7) & 0x01) {
+               if (!((cmd->args[0] >> 7) & 0x01)) {
                        ret = -ETIMEDOUT;
                        goto err_mutex_unlock;
                }
@@ -485,20 +485,6 @@ static int si2168_init(struct dvb_frontend *fe)
        if (ret)
                goto err;
 
-       cmd.args[0] = 0x05;
-       cmd.args[1] = 0x00;
-       cmd.args[2] = 0xaa;
-       cmd.args[3] = 0x4d;
-       cmd.args[4] = 0x56;
-       cmd.args[5] = 0x40;
-       cmd.args[6] = 0x00;
-       cmd.args[7] = 0x00;
-       cmd.wlen = 8;
-       cmd.rlen = 1;
-       ret = si2168_cmd_execute(s, &cmd);
-       if (ret)
-               goto err;
-
        /* cold state - try to download firmware */
        dev_info(&s->client->dev, "%s: found a '%s' in cold state\n",
                        KBUILD_MODNAME, si2168_ops.info.name);
index 2a343e896f4038d66f62ba255886a09add151b16..53f7f06ae3437aca8981f8d63f8e6538f7a77978 100644 (file)
@@ -22,7 +22,7 @@
 #include <linux/firmware.h>
 #include <linux/i2c-mux.h>
 
-#define SI2168_FIRMWARE "dvb-demod-si2168-01.fw"
+#define SI2168_FIRMWARE "dvb-demod-si2168-02.fw"
 
 /* state struct */
 struct si2168 {
index 522fe00f5eee167f6fa59a657aaad1b4dce55783..9619be5d48271827b28052c080f01fc5631224b7 100644 (file)
@@ -668,6 +668,7 @@ static int tda10071_set_frontend(struct dvb_frontend *fe)
        struct dtv_frontend_properties *c = &fe->dtv_property_cache;
        int ret, i;
        u8 mode, rolloff, pilot, inversion, div;
+       fe_modulation_t modulation;
 
        dev_dbg(&priv->i2c->dev,
                        "%s: delivery_system=%d modulation=%d frequency=%d symbol_rate=%d inversion=%d pilot=%d rolloff=%d\n",
@@ -702,10 +703,13 @@ static int tda10071_set_frontend(struct dvb_frontend *fe)
 
        switch (c->delivery_system) {
        case SYS_DVBS:
+               modulation = QPSK;
                rolloff = 0;
                pilot = 2;
                break;
        case SYS_DVBS2:
+               modulation = c->modulation;
+
                switch (c->rolloff) {
                case ROLLOFF_20:
                        rolloff = 2;
@@ -750,7 +754,7 @@ static int tda10071_set_frontend(struct dvb_frontend *fe)
 
        for (i = 0, mode = 0xff; i < ARRAY_SIZE(TDA10071_MODCOD); i++) {
                if (c->delivery_system == TDA10071_MODCOD[i].delivery_system &&
-                       c->modulation == TDA10071_MODCOD[i].modulation &&
+                       modulation == TDA10071_MODCOD[i].modulation &&
                        c->fec_inner == TDA10071_MODCOD[i].fec) {
                        mode = TDA10071_MODCOD[i].val;
                        dev_dbg(&priv->i2c->dev, "%s: mode found=%02x\n",
@@ -834,10 +838,10 @@ static int tda10071_get_frontend(struct dvb_frontend *fe)
 
        switch ((buf[1] >> 0) & 0x01) {
        case 0:
-               c->inversion = INVERSION_OFF;
+               c->inversion = INVERSION_ON;
                break;
        case 1:
-               c->inversion = INVERSION_ON;
+               c->inversion = INVERSION_OFF;
                break;
        }
 
@@ -856,7 +860,7 @@ static int tda10071_get_frontend(struct dvb_frontend *fe)
        if (ret)
                goto error;
 
-       c->symbol_rate = (buf[0] << 16) | (buf[1] << 8) | (buf[2] << 0);
+       c->symbol_rate = ((buf[0] << 16) | (buf[1] << 8) | (buf[2] << 0)) * 1000;
 
        return ret;
 error:
index 4baf14bfb65a6d6ca4a485861492efcdc955f006..42048619273682089808eb1bffd1690704f7d13a 100644 (file)
@@ -55,6 +55,7 @@ static struct tda10071_modcod {
        { SYS_DVBS2, QPSK,  FEC_8_9,  0x0a },
        { SYS_DVBS2, QPSK,  FEC_9_10, 0x0b },
        /* 8PSK */
+       { SYS_DVBS2, PSK_8, FEC_AUTO, 0x00 },
        { SYS_DVBS2, PSK_8, FEC_3_5,  0x0c },
        { SYS_DVBS2, PSK_8, FEC_2_3,  0x0d },
        { SYS_DVBS2, PSK_8, FEC_3_4,  0x0e },
index e65c760e4e8bbc5df3da9d4fa970d003d6e0cc66..0006d6bf8c18a5015dea50cbd4e0386802b80bc6 100644 (file)
@@ -179,7 +179,7 @@ static const struct v4l2_file_operations ts_fops =
        .read     = vb2_fop_read,
        .poll     = vb2_fop_poll,
        .mmap     = vb2_fop_mmap,
-       .ioctl    = video_ioctl2,
+       .unlocked_ioctl = video_ioctl2,
 };
 
 static const struct v4l2_ioctl_ops ts_ioctl_ops = {
index a7ed16497903ef171caf128697900aa9700b3915..1e4ec697fb1054c69becb4f4483f228508c2efe7 100644 (file)
@@ -269,6 +269,7 @@ err:
                list_del(&buf->list);
                vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
        }
+       spin_unlock_irqrestore(&common->irqlock, flags);
 
        return ret;
 }
index 5bb085b19bcbdad375ed9bbb52ab13851580127c..b431b58f39e3648d7d33ae6e049f1896ccd3fedc 100644 (file)
@@ -233,6 +233,7 @@ err:
                list_del(&buf->list);
                vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
        }
+       spin_unlock_irqrestore(&common->irqlock, flags);
 
        return ret;
 }
index 271a752cee5415a825fed6af3e2e125d1ebf343a..fa4cc7b880aa4774f4fdf6d7c6cf3ddbb010dbd8 100644 (file)
@@ -57,7 +57,7 @@ static int si2157_cmd_execute(struct si2157 *s, struct si2157_cmd *cmd)
                        jiffies_to_msecs(jiffies) -
                        (jiffies_to_msecs(timeout) - TIMEOUT));
 
-       if (!(buf[0] >> 7) & 0x01) {
+       if (!((buf[0] >> 7) & 0x01)) {
                ret = -ETIMEDOUT;
                goto err_mutex_unlock;
        } else {
index 021e4d35e4d7d5f65311c13bb70c33df07b40d3f..7b9b75f6077471aea8209f317bbd7a636f8e8b58 100644 (file)
@@ -704,15 +704,41 @@ static int af9035_read_config(struct dvb_usb_device *d)
                if (ret < 0)
                        goto err;
 
-               if (tmp == 0x00)
-                       dev_dbg(&d->udev->dev,
-                                       "%s: [%d]tuner not set, using default\n",
-                                       __func__, i);
-               else
+               dev_dbg(&d->udev->dev, "%s: [%d]tuner=%02x\n",
+                               __func__, i, tmp);
+
+               /* tuner sanity check */
+               if (state->chip_type == 0x9135) {
+                       if (state->chip_version == 0x02) {
+                               /* IT9135 BX (v2) */
+                               switch (tmp) {
+                               case AF9033_TUNER_IT9135_60:
+                               case AF9033_TUNER_IT9135_61:
+                               case AF9033_TUNER_IT9135_62:
+                                       state->af9033_config[i].tuner = tmp;
+                                       break;
+                               }
+                       } else {
+                               /* IT9135 AX (v1) */
+                               switch (tmp) {
+                               case AF9033_TUNER_IT9135_38:
+                               case AF9033_TUNER_IT9135_51:
+                               case AF9033_TUNER_IT9135_52:
+                                       state->af9033_config[i].tuner = tmp;
+                                       break;
+                               }
+                       }
+               } else {
+                       /* AF9035 */
                        state->af9033_config[i].tuner = tmp;
+               }
 
-               dev_dbg(&d->udev->dev, "%s: [%d]tuner=%02x\n",
-                               __func__, i, state->af9033_config[i].tuner);
+               if (state->af9033_config[i].tuner != tmp) {
+                       dev_info(&d->udev->dev,
+                                       "%s: [%d] overriding tuner from %02x to %02x\n",
+                                       KBUILD_MODNAME, i, tmp,
+                                       state->af9033_config[i].tuner);
+               }
 
                switch (state->af9033_config[i].tuner) {
                case AF9033_TUNER_TUA9001:
index 2fd1c5e31a0f2692a1d59dd88c72cbee74e61054..339adce7c7a50974ffe9c5a2a185ae26fe8956c6 100644 (file)
@@ -928,6 +928,7 @@ static const struct usb_device_id device_table[] = {
        {USB_DEVICE(0x093a, 0x2620)},
        {USB_DEVICE(0x093a, 0x2621)},
        {USB_DEVICE(0x093a, 0x2622), .driver_info = FL_VFLIP},
+       {USB_DEVICE(0x093a, 0x2623), .driver_info = FL_VFLIP},
        {USB_DEVICE(0x093a, 0x2624), .driver_info = FL_VFLIP},
        {USB_DEVICE(0x093a, 0x2625)},
        {USB_DEVICE(0x093a, 0x2626)},
index 0500c4175d5f095d3b27322f298aeb7130ab95e7..6bce01a674f9e12f9874636e962378f6965a4fad 100644 (file)
@@ -82,7 +82,7 @@ static void hdpvr_read_bulk_callback(struct urb *urb)
 }
 
 /*=========================================================================*/
-/* bufffer bits */
+/* buffer bits */
 
 /* function expects dev->io_mutex to be hold by caller */
 int hdpvr_cancel_queue(struct hdpvr_device *dev)
@@ -926,7 +926,7 @@ static int hdpvr_s_ctrl(struct v4l2_ctrl *ctrl)
        case V4L2_CID_MPEG_AUDIO_ENCODING:
                if (dev->flags & HDPVR_FLAG_AC3_CAP) {
                        opt->audio_codec = ctrl->val;
-                       return hdpvr_set_audio(dev, opt->audio_input,
+                       return hdpvr_set_audio(dev, opt->audio_input + 1,
                                              opt->audio_codec);
                }
                return 0;
@@ -1198,7 +1198,7 @@ int hdpvr_register_videodev(struct hdpvr_device *dev, struct device *parent,
        v4l2_ctrl_new_std_menu(hdl, &hdpvr_ctrl_ops,
                V4L2_CID_MPEG_AUDIO_ENCODING,
                ac3 ? V4L2_MPEG_AUDIO_ENCODING_AC3 : V4L2_MPEG_AUDIO_ENCODING_AAC,
-               0x7, V4L2_MPEG_AUDIO_ENCODING_AAC);
+               0x7, ac3 ? dev->options.audio_codec : V4L2_MPEG_AUDIO_ENCODING_AAC);
        v4l2_ctrl_new_std_menu(hdl, &hdpvr_ctrl_ops,
                V4L2_CID_MPEG_VIDEO_ENCODING,
                V4L2_MPEG_VIDEO_ENCODING_MPEG_4_AVC, 0x3,
index 4ae54caadd0375b786bf50147c6499e892790263..ce1c9f5d9dee1040c43f798b5163c4821feabbfd 100644 (file)
@@ -610,10 +610,10 @@ struct v4l2_fract v4l2_calc_aspect_ratio(u8 hor_landscape, u8 vert_portrait)
                aspect.denominator = 9;
        } else if (ratio == 34) {
                aspect.numerator = 4;
-               aspect.numerator = 3;
+               aspect.denominator = 3;
        } else if (ratio == 68) {
                aspect.numerator = 15;
-               aspect.numerator = 9;
+               aspect.denominator = 9;
        } else {
                aspect.numerator = hor_landscape + 99;
                aspect.denominator = 100;
index e4ec355704a6c94488f57b567c93a75301d8fa7a..a7543ba3e19041e5aad85c6718b856a07f8f9726 100644 (file)
 /* Atmel chips */
 #define AT49BV640D     0x02de
 #define AT49BV640DT    0x02db
+/* Sharp chips */
+#define LH28F640BFHE_PTTL90    0x00b0
+#define LH28F640BFHE_PBTL90    0x00b1
+#define LH28F640BFHE_PTTL70A   0x00b2
+#define LH28F640BFHE_PBTL70A   0x00b3
 
 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
@@ -258,6 +263,36 @@ static void fixup_st_m28w320cb(struct mtd_info *mtd)
                (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
 };
 
+static int is_LH28F640BF(struct cfi_private *cfi)
+{
+       /* Sharp LH28F640BF Family */
+       if (cfi->mfr == CFI_MFR_SHARP && (
+           cfi->id == LH28F640BFHE_PTTL90 || cfi->id == LH28F640BFHE_PBTL90 ||
+           cfi->id == LH28F640BFHE_PTTL70A || cfi->id == LH28F640BFHE_PBTL70A))
+               return 1;
+       return 0;
+}
+
+static void fixup_LH28F640BF(struct mtd_info *mtd)
+{
+       struct map_info *map = mtd->priv;
+       struct cfi_private *cfi = map->fldrv_priv;
+       struct cfi_pri_intelext *extp = cfi->cmdset_priv;
+
+       /* Reset the Partition Configuration Register on LH28F640BF
+        * to a single partition (PCR = 0x000): PCR is embedded into A0-A15. */
+       if (is_LH28F640BF(cfi)) {
+               printk(KERN_INFO "Reset Partition Config. Register: 1 Partition of 4 planes\n");
+               map_write(map, CMD(0x60), 0);
+               map_write(map, CMD(0x04), 0);
+
+               /* We have set one single partition thus
+                * Simultaneous Operations are not allowed */
+               printk(KERN_INFO "cfi_cmdset_0001: Simultaneous Operations disabled\n");
+               extp->FeatureSupport &= ~512;
+       }
+}
+
 static void fixup_use_point(struct mtd_info *mtd)
 {
        struct map_info *map = mtd->priv;
@@ -309,6 +344,8 @@ static struct cfi_fixup cfi_fixup_table[] = {
        { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct },
        { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb },
        { CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock },
+       { CFI_MFR_SHARP, CFI_ID_ANY, fixup_unlock_powerup_lock },
+       { CFI_MFR_SHARP, CFI_ID_ANY, fixup_LH28F640BF },
        { 0, 0, NULL }
 };
 
@@ -1649,6 +1686,12 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
        initial_adr = adr;
        cmd_adr = adr & ~(wbufsize-1);
 
+       /* Sharp LH28F640BF chips need the first address for the
+        * Page Buffer Program command. See Table 5 of
+        * LH28F320BF, LH28F640BF, LH28F128BF Series (Appendix FUM00701) */
+       if (is_LH28F640BF(cfi))
+               cmd_adr = adr;
+
        /* Let's determine this according to the interleave only once */
        write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0xe8) : CMD(0xe9);
 
index 7df86948e6d4029649c93118002afaee4ea98f5b..b4f61c7fc161c6dc32e080e8328ca78f96fa1093 100644 (file)
@@ -475,6 +475,7 @@ static int elm_context_save(struct elm_info *info)
                                        ELM_SYNDROME_FRAGMENT_1 + offset);
                        regs->elm_syndrome_fragment_0[i] = elm_read_reg(info,
                                        ELM_SYNDROME_FRAGMENT_0 + offset);
+                       break;
                default:
                        return -EINVAL;
                }
@@ -520,6 +521,7 @@ static int elm_context_restore(struct elm_info *info)
                                        regs->elm_syndrome_fragment_1[i]);
                        elm_write_reg(info, ELM_SYNDROME_FRAGMENT_0 + offset,
                                        regs->elm_syndrome_fragment_0[i]);
+                       break;
                default:
                        return -EINVAL;
                }
index 41167e9e991e4e50c1e1c2b72dbc468d654db22d..4f3e80c68a266243bad7e37aefbb819a797c6e77 100644 (file)
@@ -4047,8 +4047,10 @@ int nand_scan_tail(struct mtd_info *mtd)
                ecc->layout->oobavail += ecc->layout->oobfree[i].length;
        mtd->oobavail = ecc->layout->oobavail;
 
-       /* ECC sanity check: warn noisily if it's too weak */
-       WARN_ON(!nand_ecc_strength_good(mtd));
+       /* ECC sanity check: warn if it's too weak */
+       if (!nand_ecc_strength_good(mtd))
+               pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n",
+                       mtd->name);
 
        /*
         * Set the number of read / write steps for one page depending on ECC
index b04e7d059888d3fd36c82cdba3b9863cb25901e1..0431b46d9fd9e0211a8c7f5e865721a6b32895b5 100644 (file)
@@ -125,7 +125,7 @@ static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id,
                parent = *p;
                av = rb_entry(parent, struct ubi_ainf_volume, rb);
 
-               if (vol_id < av->vol_id)
+               if (vol_id > av->vol_id)
                        p = &(*p)->rb_left;
                else
                        p = &(*p)->rb_right;
@@ -423,7 +423,7 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
                                pnum, err);
                        ret = err > 0 ? UBI_BAD_FASTMAP : err;
                        goto out;
-               } else if (ret == UBI_IO_BITFLIPS)
+               } else if (err == UBI_IO_BITFLIPS)
                        scrub = 1;
 
                /*
index 3a451b6cd3d50844fb8fd2e21e3c0305ea0ad1ff..701f86cd5993246633b9be643f0801ba6c71b79d 100644 (file)
@@ -4068,7 +4068,7 @@ static int bond_check_params(struct bond_params *params)
        }
 
        if (ad_select) {
-               bond_opt_initstr(&newval, lacp_rate);
+               bond_opt_initstr(&newval, ad_select);
                valptr = bond_opt_parse(bond_opt_get(BOND_OPT_AD_SELECT),
                                        &newval);
                if (!valptr) {
index 141160ef249ae83e9d1fe8672dca3926ea9b7172..5776e503e4c57eb374e304fecc8e0fa44e2e5f85 100644 (file)
@@ -654,13 +654,13 @@ static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
 
        work_done = bcm_sysport_tx_reclaim(ring->priv, ring);
 
-       if (work_done < budget) {
+       if (work_done == 0) {
                napi_complete(napi);
                /* re-enable TX interrupt */
                intrl2_1_mask_clear(ring->priv, BIT(ring->index));
        }
 
-       return work_done;
+       return 0;
 }
 
 static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv)
@@ -1254,28 +1254,17 @@ static inline void umac_enable_set(struct bcm_sysport_priv *priv,
                usleep_range(1000, 2000);
 }
 
-static inline int umac_reset(struct bcm_sysport_priv *priv)
+static inline void umac_reset(struct bcm_sysport_priv *priv)
 {
-       unsigned int timeout = 0;
        u32 reg;
-       int ret = 0;
-
-       umac_writel(priv, 0, UMAC_CMD);
-       while (timeout++ < 1000) {
-               reg = umac_readl(priv, UMAC_CMD);
-               if (!(reg & CMD_SW_RESET))
-                       break;
-
-               udelay(1);
-       }
-
-       if (timeout == 1000) {
-               dev_err(&priv->pdev->dev,
-                       "timeout waiting for MAC to come out of reset\n");
-               ret = -ETIMEDOUT;
-       }
 
-       return ret;
+       reg = umac_readl(priv, UMAC_CMD);
+       reg |= CMD_SW_RESET;
+       umac_writel(priv, reg, UMAC_CMD);
+       udelay(10);
+       reg = umac_readl(priv, UMAC_CMD);
+       reg &= ~CMD_SW_RESET;
+       umac_writel(priv, reg, UMAC_CMD);
 }
 
 static void umac_set_hw_addr(struct bcm_sysport_priv *priv,
@@ -1303,11 +1292,7 @@ static int bcm_sysport_open(struct net_device *dev)
        int ret;
 
        /* Reset UniMAC */
-       ret = umac_reset(priv);
-       if (ret) {
-               netdev_err(dev, "UniMAC reset failed\n");
-               return ret;
-       }
+       umac_reset(priv);
 
        /* Flush TX and RX FIFOs at TOPCTRL level */
        topctrl_flush(priv);
@@ -1589,12 +1574,6 @@ static int bcm_sysport_probe(struct platform_device *pdev)
        BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8);
        dev->needed_headroom += sizeof(struct bcm_tsb);
 
-       /* We are interfaced to a switch which handles the multicast
-        * filtering for us, so we do not support programming any
-        * multicast hash table in this Ethernet MAC.
-        */
-       dev->flags &= ~IFF_MULTICAST;
-
        /* libphy will adjust the link state accordingly */
        netif_carrier_off(dev);
 
index 47c5814114e1764145f18c9c2088e8395214602e..4b875da1c7ed2afc0eec3854217b4919797f3e20 100644 (file)
@@ -797,7 +797,8 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 
                return;
        }
-       bnx2x_frag_free(fp, new_data);
+       if (new_data)
+               bnx2x_frag_free(fp, new_data);
 drop:
        /* drop the packet and keep the buffer in the bin */
        DP(NETIF_MSG_RX_STATUS,
index 2887034523e065a362dded7cf025742de29539b3..6a8b1453a1b96e80bc9e58eef13787fdaa6afe5e 100644 (file)
@@ -12937,7 +12937,7 @@ static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev, int cnic_cnt)
         * without the default SB.
         * For VFs there is no default SB, then we return (index+1).
         */
-       pci_read_config_word(pdev, pdev->msix_cap + PCI_MSI_FLAGS, &control);
+       pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &control);
 
        index = control & PCI_MSIX_FLAGS_QSIZE;
 
index 5ba1cfbd60da3555878fa8fd467c3a9a36c03642..16281ad2da12c04ee8324ec85835b541479788c5 100644 (file)
@@ -1408,13 +1408,6 @@ static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv)
                if (cb->skb)
                        continue;
 
-               /* set the DMA descriptor length once and for all
-                * it will only change if we support dynamically sizing
-                * priv->rx_buf_len, but we do not
-                */
-               dmadesc_set_length_status(priv, priv->rx_bd_assign_ptr,
-                               priv->rx_buf_len << DMA_BUFLENGTH_SHIFT);
-
                ret = bcmgenet_rx_refill(priv, cb);
                if (ret)
                        break;
@@ -2535,14 +2528,17 @@ static int bcmgenet_probe(struct platform_device *pdev)
        netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1);
        netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1);
 
-       err = register_netdev(dev);
-       if (err)
-               goto err_clk_disable;
+       /* libphy will determine the link state */
+       netif_carrier_off(dev);
 
        /* Turn off the main clock, WOL clock is handled separately */
        if (!IS_ERR(priv->clk))
                clk_disable_unprepare(priv->clk);
 
+       err = register_netdev(dev);
+       if (err)
+               goto err;
+
        return err;
 
 err_clk_disable:
index 0f117105fed1664a33d0af4325c2b24caa34d86f..e23c993b13625bca2af3addbde66609c2f3202f0 100644 (file)
@@ -331,9 +331,9 @@ struct bcmgenet_mib_counters {
 #define  EXT_ENERGY_DET_MASK           (1 << 12)
 
 #define EXT_RGMII_OOB_CTRL             0x0C
-#define  RGMII_MODE_EN                 (1 << 0)
 #define  RGMII_LINK                    (1 << 4)
 #define  OOB_DISABLE                   (1 << 5)
+#define  RGMII_MODE_EN                 (1 << 6)
 #define  ID_MODE_DIS                   (1 << 16)
 
 #define EXT_GPHY_CTRL                  0x1C
index 34a26e42f19d39b66b7b644ea296a58413f5e691..1e187fb760f80fce4099f779ee44abafc10b22f2 100644 (file)
@@ -2902,7 +2902,7 @@ static int be_open(struct net_device *netdev)
        for_all_evt_queues(adapter, eqo, i) {
                napi_enable(&eqo->napi);
                be_enable_busy_poll(eqo);
-               be_eq_notify(adapter, eqo->q.id, true, false, 0);
+               be_eq_notify(adapter, eqo->q.id, true, true, 0);
        }
        adapter->flags |= BE_FLAGS_NAPI_ENABLED;
 
index fab39e2954410106f9c26304f73c29169c1d35a1..36fc429298e353191cc93fa10140e298bc7cff85 100644 (file)
@@ -2990,11 +2990,11 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
        if (ug_info->rxExtendedFiltering) {
                size += THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING;
                if (ug_info->largestexternallookupkeysize ==
-                   QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
+                   QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
                        size +=
                            THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8;
                if (ug_info->largestexternallookupkeysize ==
-                   QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES)
+                   QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)
                        size +=
                            THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16;
        }
index a2db388cc31e6018c892ddb2932e95ba869a09ec..ee74f9536b31b9d71471a351d7b8ad96c375da41 100644 (file)
@@ -1481,6 +1481,13 @@ static s32 igb_init_hw_82575(struct e1000_hw *hw)
        s32 ret_val;
        u16 i, rar_count = mac->rar_entry_count;
 
+       if ((hw->mac.type >= e1000_i210) &&
+           !(igb_get_flash_presence_i210(hw))) {
+               ret_val = igb_pll_workaround_i210(hw);
+               if (ret_val)
+                       return ret_val;
+       }
+
        /* Initialize identification LED */
        ret_val = igb_id_led_init(hw);
        if (ret_val) {
index 2a8bb35c2df2bb2824015768af176abe5dfeebae..217f8138851bf3e229d6a0035b442e0cc3ae8175 100644 (file)
 #define E1000_CTRL_EXT_SDP3_DIR  0x00000800 /* SDP3 Data direction */
 
 /* Physical Func Reset Done Indication */
-#define E1000_CTRL_EXT_PFRSTD    0x00004000
-#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
-#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES  0x00C00000
-#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX  0x00400000
-#define E1000_CTRL_EXT_LINK_MODE_SGMII   0x00800000
-#define E1000_CTRL_EXT_LINK_MODE_GMII   0x00000000
-#define E1000_CTRL_EXT_EIAME          0x01000000
-#define E1000_CTRL_EXT_IRCA           0x00000001
+#define E1000_CTRL_EXT_PFRSTD  0x00004000
+#define E1000_CTRL_EXT_SDLPE   0X00040000  /* SerDes Low Power Enable */
+#define E1000_CTRL_EXT_LINK_MODE_MASK  0x00C00000
+#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES   0x00C00000
+#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX   0x00400000
+#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000
+#define E1000_CTRL_EXT_LINK_MODE_GMII  0x00000000
+#define E1000_CTRL_EXT_EIAME   0x01000000
+#define E1000_CTRL_EXT_IRCA            0x00000001
 /* Interrupt delay cancellation */
 /* Driver loaded bit for FW */
 #define E1000_CTRL_EXT_DRV_LOAD       0x10000000
@@ -62,6 +63,7 @@
 /* packet buffer parity error detection enabled */
 /* descriptor FIFO parity error detection enable */
 #define E1000_CTRL_EXT_PBA_CLR         0x80000000 /* PBA Clear */
+#define E1000_CTRL_EXT_PHYPDEN         0x00100000
 #define E1000_I2CCMD_REG_ADDR_SHIFT    16
 #define E1000_I2CCMD_PHY_ADDR_SHIFT    24
 #define E1000_I2CCMD_OPCODE_READ       0x08000000
index 89925e4058498ea1c1ffda3195576d8abcda611e..ce55ea5d750cd7edb69d90a6f6ff9a13e8e47f3f 100644 (file)
@@ -567,4 +567,7 @@ struct net_device *igb_get_hw_dev(struct e1000_hw *hw);
 /* These functions must be implemented by drivers */
 s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
 s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
+
+void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
+void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
 #endif /* _E1000_HW_H_ */
index 337161f440dd67aa473f616621933332955a842c..65d931669f813bbcca0a21cc13a68c53663b03ee 100644 (file)
@@ -834,3 +834,69 @@ s32 igb_init_nvm_params_i210(struct e1000_hw *hw)
        }
        return ret_val;
 }
+
+/**
+ * igb_pll_workaround_i210
+ * @hw: pointer to the HW structure
+ *
+ * Works around an errata in the PLL circuit where it occasionally
+ * provides the wrong clock frequency after power up.
+ **/
+s32 igb_pll_workaround_i210(struct e1000_hw *hw)
+{
+       s32 ret_val;
+       u32 wuc, mdicnfg, ctrl, ctrl_ext, reg_val;
+       u16 nvm_word, phy_word, pci_word, tmp_nvm;
+       int i;
+
+       /* Get and set needed register values */
+       wuc = rd32(E1000_WUC);
+       mdicnfg = rd32(E1000_MDICNFG);
+       reg_val = mdicnfg & ~E1000_MDICNFG_EXT_MDIO;
+       wr32(E1000_MDICNFG, reg_val);
+
+       /* Get data from NVM, or set default */
+       ret_val = igb_read_invm_word_i210(hw, E1000_INVM_AUTOLOAD,
+                                         &nvm_word);
+       if (ret_val)
+               nvm_word = E1000_INVM_DEFAULT_AL;
+       tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL;
+       for (i = 0; i < E1000_MAX_PLL_TRIES; i++) {
+               /* check current state directly from internal PHY */
+               igb_read_phy_reg_gs40g(hw, (E1000_PHY_PLL_FREQ_PAGE |
+                                        E1000_PHY_PLL_FREQ_REG), &phy_word);
+               if ((phy_word & E1000_PHY_PLL_UNCONF)
+                   != E1000_PHY_PLL_UNCONF) {
+                       ret_val = 0;
+                       break;
+               } else {
+                       ret_val = -E1000_ERR_PHY;
+               }
+               /* directly reset the internal PHY */
+               ctrl = rd32(E1000_CTRL);
+               wr32(E1000_CTRL, ctrl|E1000_CTRL_PHY_RST);
+
+               ctrl_ext = rd32(E1000_CTRL_EXT);
+               ctrl_ext |= (E1000_CTRL_EXT_PHYPDEN | E1000_CTRL_EXT_SDLPE);
+               wr32(E1000_CTRL_EXT, ctrl_ext);
+
+               wr32(E1000_WUC, 0);
+               reg_val = (E1000_INVM_AUTOLOAD << 4) | (tmp_nvm << 16);
+               wr32(E1000_EEARBC_I210, reg_val);
+
+               igb_read_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
+               pci_word |= E1000_PCI_PMCSR_D3;
+               igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
+               usleep_range(1000, 2000);
+               pci_word &= ~E1000_PCI_PMCSR_D3;
+               igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
+               reg_val = (E1000_INVM_AUTOLOAD << 4) | (nvm_word << 16);
+               wr32(E1000_EEARBC_I210, reg_val);
+
+               /* restore WUC register */
+               wr32(E1000_WUC, wuc);
+       }
+       /* restore MDICNFG setting */
+       wr32(E1000_MDICNFG, mdicnfg);
+       return ret_val;
+}
index 9f34976687baedc7eb4d4844678cb2592c10e9d1..3442b6357d01211d9edd310f1b8339202ae5af19 100644 (file)
@@ -33,6 +33,7 @@ s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data);
 s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data);
 s32 igb_init_nvm_params_i210(struct e1000_hw *hw);
 bool igb_get_flash_presence_i210(struct e1000_hw *hw);
+s32 igb_pll_workaround_i210(struct e1000_hw *hw);
 
 #define E1000_STM_OPCODE               0xDB00
 #define E1000_EEPROM_FLASH_SIZE_WORD   0x11
@@ -78,4 +79,15 @@ enum E1000_INVM_STRUCTURE_TYPE {
 #define NVM_LED_1_CFG_DEFAULT_I211     0x0184
 #define NVM_LED_0_2_CFG_DEFAULT_I211   0x200C
 
+/* PLL Defines */
+#define E1000_PCI_PMCSR                        0x44
+#define E1000_PCI_PMCSR_D3             0x03
+#define E1000_MAX_PLL_TRIES            5
+#define E1000_PHY_PLL_UNCONF           0xFF
+#define E1000_PHY_PLL_FREQ_PAGE                0xFC0000
+#define E1000_PHY_PLL_FREQ_REG         0x000E
+#define E1000_INVM_DEFAULT_AL          0x202F
+#define E1000_INVM_AUTOLOAD            0x0A
+#define E1000_INVM_PLL_WO_VAL          0x0010
+
 #endif
index 1cc4b1a7e597d32823ff2cc221aa54ce370a5a15..f5ba4e4eafb9ce54aa5b0ccd5478fe13814b39a0 100644 (file)
@@ -66,6 +66,7 @@
 #define E1000_PBA      0x01000  /* Packet Buffer Allocation - RW */
 #define E1000_PBS      0x01008  /* Packet Buffer Size */
 #define E1000_EEMNGCTL 0x01010  /* MNG EEprom Control */
+#define E1000_EEARBC_I210 0x12024  /* EEPROM Auto Read Bus Control */
 #define E1000_EEWR     0x0102C  /* EEPROM Write Register - RW */
 #define E1000_I2CCMD   0x01028  /* SFPI2C Command Register - RW */
 #define E1000_FRTIMER  0x01048  /* Free Running Timer - RW */
index f145adbb55ac011905636ac17492c55c519e2789..a9537ba7a5a072630acc8842b875d98428922bb5 100644 (file)
@@ -7215,6 +7215,20 @@ static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
        }
 }
 
+void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
+{
+       struct igb_adapter *adapter = hw->back;
+
+       pci_read_config_word(adapter->pdev, reg, value);
+}
+
+void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
+{
+       struct igb_adapter *adapter = hw->back;
+
+       pci_write_config_word(adapter->pdev, reg, *value);
+}
+
 s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
 {
        struct igb_adapter *adapter = hw->back;
@@ -7578,6 +7592,8 @@ static int igb_sriov_reinit(struct pci_dev *dev)
 
        if (netif_running(netdev))
                igb_close(netdev);
+       else
+               igb_reset(adapter);
 
        igb_clear_interrupt_scheme(adapter);
 
index 45beca17fa50a3d1e4f920ad44097a3df3d73d07..dadd9a5f6323c5915be126aff248631e3fedc46e 100644 (file)
@@ -1207,7 +1207,7 @@ static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
        command =  l3_offs    << MVNETA_TX_L3_OFF_SHIFT;
        command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT;
 
-       if (l3_proto == swab16(ETH_P_IP))
+       if (l3_proto == htons(ETH_P_IP))
                command |= MVNETA_TXD_IP_CSUM;
        else
                command |= MVNETA_TX_L3_IP6;
@@ -2529,7 +2529,7 @@ static void mvneta_adjust_link(struct net_device *ndev)
 
                        if (phydev->speed == SPEED_1000)
                                val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
-                       else
+                       else if (phydev->speed == SPEED_100)
                                val |= MVNETA_GMAC_CONFIG_MII_SPEED;
 
                        mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
index 80f725228f5b7c8ab836f8bccd747fdc006782c6..56022d6478370d9b8d71c84c3b2220fa9aa73180 100644 (file)
@@ -294,8 +294,6 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
        init_completion(&cq->free);
 
        cq->irq = priv->eq_table.eq[cq->vector].irq;
-       cq->irq_affinity_change = false;
-
        return 0;
 
 err_radix:
index 4b2130760eede3ad5f655cf08f2614d033c8ecb8..82322b1c8411b80ff9d15f8c8022a3095f69c4d0 100644 (file)
@@ -128,11 +128,16 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
                                        mlx4_warn(mdev, "Failed assigning an EQ to %s, falling back to legacy EQ's\n",
                                                  name);
                                }
+
                        }
                } else {
                        cq->vector = (cq->ring + 1 + priv->port) %
                                mdev->dev->caps.num_comp_vectors;
                }
+
+               cq->irq_desc =
+                       irq_to_desc(mlx4_eq_get_irq(mdev->dev,
+                                                   cq->vector));
        } else {
                /* For TX we use the same irq per
                ring we assigned for the RX    */
@@ -187,8 +192,6 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
        mlx4_en_unmap_buffer(&cq->wqres.buf);
        mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
        if (priv->mdev->dev->caps.comp_pool && cq->vector) {
-               if (!cq->is_tx)
-                       irq_set_affinity_hint(cq->mcq.irq, NULL);
                mlx4_release_eq(priv->mdev->dev, cq->vector);
        }
        cq->vector = 0;
@@ -204,6 +207,7 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
        if (!cq->is_tx) {
                napi_hash_del(&cq->napi);
                synchronize_rcu();
+               irq_set_affinity_hint(cq->mcq.irq, NULL);
        }
        netif_napi_del(&cq->napi);
 
index fa1a069e14e6f3ef21485172d612548034e00262..68d763d2d030d578e38ba970ea9a744a5bad4248 100644 (file)
@@ -417,6 +417,8 @@ static int mlx4_en_get_coalesce(struct net_device *dev,
 
        coal->tx_coalesce_usecs = priv->tx_usecs;
        coal->tx_max_coalesced_frames = priv->tx_frames;
+       coal->tx_max_coalesced_frames_irq = priv->tx_work_limit;
+
        coal->rx_coalesce_usecs = priv->rx_usecs;
        coal->rx_max_coalesced_frames = priv->rx_frames;
 
@@ -426,6 +428,7 @@ static int mlx4_en_get_coalesce(struct net_device *dev,
        coal->rx_coalesce_usecs_high = priv->rx_usecs_high;
        coal->rate_sample_interval = priv->sample_interval;
        coal->use_adaptive_rx_coalesce = priv->adaptive_rx_coal;
+
        return 0;
 }
 
@@ -434,6 +437,9 @@ static int mlx4_en_set_coalesce(struct net_device *dev,
 {
        struct mlx4_en_priv *priv = netdev_priv(dev);
 
+       if (!coal->tx_max_coalesced_frames_irq)
+               return -EINVAL;
+
        priv->rx_frames = (coal->rx_max_coalesced_frames ==
                           MLX4_EN_AUTO_CONF) ?
                                MLX4_EN_RX_COAL_TARGET :
@@ -457,6 +463,7 @@ static int mlx4_en_set_coalesce(struct net_device *dev,
        priv->rx_usecs_high = coal->rx_coalesce_usecs_high;
        priv->sample_interval = coal->rate_sample_interval;
        priv->adaptive_rx_coal = coal->use_adaptive_rx_coalesce;
+       priv->tx_work_limit = coal->tx_max_coalesced_frames_irq;
 
        return mlx4_en_moderation_update(priv);
 }
index 7d4fb7bf25933ddcddecebf0a551bbe4cfa6328a..7345c43b019e52e9e45ab3f05b769b618f17f468 100644 (file)
@@ -2336,7 +2336,7 @@ static void mlx4_en_add_vxlan_port(struct  net_device *dev,
        struct mlx4_en_priv *priv = netdev_priv(dev);
        __be16 current_port;
 
-       if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS))
+       if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
                return;
 
        if (sa_family == AF_INET6)
@@ -2473,6 +2473,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
                        MLX4_WQE_CTRL_SOLICITED);
        priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up;
        priv->tx_ring_num = prof->tx_ring_num;
+       priv->tx_work_limit = MLX4_EN_DEFAULT_TX_WORK;
 
        priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS,
                                GFP_KERNEL);
index d2d415732d994178117eafb39cc6d8207ce5322e..5535862f27cc57c0dbb0c9b972e020477e858c6a 100644 (file)
@@ -40,6 +40,7 @@
 #include <linux/if_ether.h>
 #include <linux/if_vlan.h>
 #include <linux/vmalloc.h>
+#include <linux/irq.h>
 
 #include "mlx4_en.h"
 
@@ -782,6 +783,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
                                                             PKT_HASH_TYPE_L3);
 
                                        skb_record_rx_queue(gro_skb, cq->ring);
+                                       skb_mark_napi_id(gro_skb, &cq->napi);
 
                                        if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) {
                                                timestamp = mlx4_en_get_cqe_ts(cqe);
@@ -896,16 +898,25 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
 
        /* If we used up all the quota - we're probably not done yet... */
        if (done == budget) {
+               int cpu_curr;
+               const struct cpumask *aff;
+
                INC_PERF_COUNTER(priv->pstats.napi_quota);
-               if (unlikely(cq->mcq.irq_affinity_change)) {
-                       cq->mcq.irq_affinity_change = false;
+
+               cpu_curr = smp_processor_id();
+               aff = irq_desc_get_irq_data(cq->irq_desc)->affinity;
+
+               if (unlikely(!cpumask_test_cpu(cpu_curr, aff))) {
+                       /* Current cpu is not according to smp_irq_affinity -
+                        * probably affinity changed. need to stop this NAPI
+                        * poll, and restart it on the right CPU
+                        */
                        napi_complete(napi);
                        mlx4_en_arm_cq(priv, cq);
                        return 0;
                }
        } else {
                /* Done for now */
-               cq->mcq.irq_affinity_change = false;
                napi_complete(napi);
                mlx4_en_arm_cq(priv, cq);
        }
index 8be7483f82368c7733e4251019672d0cd227be08..5045bab596338c390277255ed33a9a5ff8939c5a 100644 (file)
@@ -351,9 +351,8 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
        return cnt;
 }
 
-static int mlx4_en_process_tx_cq(struct net_device *dev,
-                                struct mlx4_en_cq *cq,
-                                int budget)
+static bool mlx4_en_process_tx_cq(struct net_device *dev,
+                                struct mlx4_en_cq *cq)
 {
        struct mlx4_en_priv *priv = netdev_priv(dev);
        struct mlx4_cq *mcq = &cq->mcq;
@@ -372,9 +371,10 @@ static int mlx4_en_process_tx_cq(struct net_device *dev,
        int factor = priv->cqe_factor;
        u64 timestamp = 0;
        int done = 0;
+       int budget = priv->tx_work_limit;
 
        if (!priv->port_up)
-               return 0;
+               return true;
 
        index = cons_index & size_mask;
        cqe = &buf[(index << factor) + factor];
@@ -447,7 +447,7 @@ static int mlx4_en_process_tx_cq(struct net_device *dev,
                netif_tx_wake_queue(ring->tx_queue);
                ring->wake_queue++;
        }
-       return done;
+       return done < budget;
 }
 
 void mlx4_en_tx_irq(struct mlx4_cq *mcq)
@@ -467,24 +467,16 @@ int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget)
        struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
        struct net_device *dev = cq->dev;
        struct mlx4_en_priv *priv = netdev_priv(dev);
-       int done;
+       int clean_complete;
 
-       done = mlx4_en_process_tx_cq(dev, cq, budget);
+       clean_complete = mlx4_en_process_tx_cq(dev, cq);
+       if (!clean_complete)
+               return budget;
 
-       /* If we used up all the quota - we're probably not done yet... */
-       if (done < budget) {
-               /* Done for now */
-               cq->mcq.irq_affinity_change = false;
-               napi_complete(napi);
-               mlx4_en_arm_cq(priv, cq);
-               return done;
-       } else if (unlikely(cq->mcq.irq_affinity_change)) {
-               cq->mcq.irq_affinity_change = false;
-               napi_complete(napi);
-               mlx4_en_arm_cq(priv, cq);
-               return 0;
-       }
-       return budget;
+       napi_complete(napi);
+       mlx4_en_arm_cq(priv, cq);
+
+       return 0;
 }
 
 static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
index d954ec1eac173752e23e57653ccd4d2cae2de944..2a004b347e1dd896f4b20c9cd36178c0e1f7bfb5 100644 (file)
@@ -53,11 +53,6 @@ enum {
        MLX4_EQ_ENTRY_SIZE      = 0x20
 };
 
-struct mlx4_irq_notify {
-       void *arg;
-       struct irq_affinity_notify notify;
-};
-
 #define MLX4_EQ_STATUS_OK         ( 0 << 28)
 #define MLX4_EQ_STATUS_WRITE_FAIL  (10 << 28)
 #define MLX4_EQ_OWNER_SW          ( 0 << 24)
@@ -1088,57 +1083,6 @@ static void mlx4_unmap_clr_int(struct mlx4_dev *dev)
        iounmap(priv->clr_base);
 }
 
-static void mlx4_irq_notifier_notify(struct irq_affinity_notify *notify,
-                                    const cpumask_t *mask)
-{
-       struct mlx4_irq_notify *n = container_of(notify,
-                                                struct mlx4_irq_notify,
-                                                notify);
-       struct mlx4_priv *priv = (struct mlx4_priv *)n->arg;
-       struct radix_tree_iter iter;
-       void **slot;
-
-       radix_tree_for_each_slot(slot, &priv->cq_table.tree, &iter, 0) {
-               struct mlx4_cq *cq = (struct mlx4_cq *)(*slot);
-
-               if (cq->irq == notify->irq)
-                       cq->irq_affinity_change = true;
-       }
-}
-
-static void mlx4_release_irq_notifier(struct kref *ref)
-{
-       struct mlx4_irq_notify *n = container_of(ref, struct mlx4_irq_notify,
-                                                notify.kref);
-       kfree(n);
-}
-
-static void mlx4_assign_irq_notifier(struct mlx4_priv *priv,
-                                    struct mlx4_dev *dev, int irq)
-{
-       struct mlx4_irq_notify *irq_notifier = NULL;
-       int err = 0;
-
-       irq_notifier = kzalloc(sizeof(*irq_notifier), GFP_KERNEL);
-       if (!irq_notifier) {
-               mlx4_warn(dev, "Failed to allocate irq notifier. irq %d\n",
-                         irq);
-               return;
-       }
-
-       irq_notifier->notify.irq = irq;
-       irq_notifier->notify.notify = mlx4_irq_notifier_notify;
-       irq_notifier->notify.release = mlx4_release_irq_notifier;
-       irq_notifier->arg = priv;
-       err = irq_set_affinity_notifier(irq, &irq_notifier->notify);
-       if (err) {
-               kfree(irq_notifier);
-               irq_notifier = NULL;
-               mlx4_warn(dev, "Failed to set irq notifier. irq %d\n", irq);
-       }
-}
-
-
 int mlx4_alloc_eq_table(struct mlx4_dev *dev)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
@@ -1409,8 +1353,6 @@ int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
                                continue;
                                /*we dont want to break here*/
                        }
-                       mlx4_assign_irq_notifier(priv, dev,
-                                                priv->eq_table.eq[vec].irq);
 
                        eq_set_ci(&priv->eq_table.eq[vec], 1);
                }
@@ -1427,6 +1369,14 @@ int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
 }
 EXPORT_SYMBOL(mlx4_assign_eq);
 
+int mlx4_eq_get_irq(struct mlx4_dev *dev, int vec)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+
+       return priv->eq_table.eq[vec].irq;
+}
+EXPORT_SYMBOL(mlx4_eq_get_irq);
+
 void mlx4_release_eq(struct mlx4_dev *dev, int vec)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
@@ -1438,9 +1388,6 @@ void mlx4_release_eq(struct mlx4_dev *dev, int vec)
                  Belonging to a legacy EQ*/
                mutex_lock(&priv->msix_ctl.pool_lock);
                if (priv->msix_ctl.pool_bm & 1ULL << i) {
-                       irq_set_affinity_notifier(
-                               priv->eq_table.eq[vec].irq,
-                               NULL);
                        free_irq(priv->eq_table.eq[vec].irq,
                                 &priv->eq_table.eq[vec]);
                        priv->msix_ctl.pool_bm &= ~(1ULL << i);
index 0e15295bedd671a0c3fc8c1ebbf0372052c489b7..d72a5a894fc6aef71315c098141f326d1b2dd55d 100644 (file)
@@ -126,6 +126,8 @@ enum {
 #define MAX_TX_RINGS                   (MLX4_EN_MAX_TX_RING_P_UP * \
                                         MLX4_EN_NUM_UP)
 
+#define MLX4_EN_DEFAULT_TX_WORK                256
+
 /* Target number of packets to coalesce with interrupt moderation */
 #define MLX4_EN_RX_COAL_TARGET 44
 #define MLX4_EN_RX_COAL_TIME   0x10
@@ -343,6 +345,7 @@ struct mlx4_en_cq {
 #define CQ_USER_PEND (MLX4_EN_CQ_STATE_POLL | MLX4_EN_CQ_STATE_POLL_YIELD)
        spinlock_t poll_lock; /* protects from LLS/napi conflicts */
 #endif  /* CONFIG_NET_RX_BUSY_POLL */
+       struct irq_desc *irq_desc;
 };
 
 struct mlx4_en_port_profile {
@@ -542,6 +545,7 @@ struct mlx4_en_priv {
        __be32 ctrl_flags;
        u32 flags;
        u8 num_tx_rings_p_up;
+       u32 tx_work_limit;
        u32 tx_ring_num;
        u32 rx_ring_num;
        u32 rx_skb_size;
index ba0401d4af502bc5ad83568b067b96dd5436243e..184c3615f4799bbda0adc0da0e265cacc6ae8bd6 100644 (file)
@@ -94,6 +94,11 @@ int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
        write_lock_irq(&table->lock);
        err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->key), mr);
        write_unlock_irq(&table->lock);
+       if (err) {
+               mlx5_core_warn(dev, "failed radix tree insert of mr 0x%x, %d\n",
+                              mlx5_base_mkey(mr->key), err);
+               mlx5_core_destroy_mkey(dev, mr);
+       }
 
        return err;
 }
@@ -104,12 +109,22 @@ int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr)
        struct mlx5_mr_table *table = &dev->priv.mr_table;
        struct mlx5_destroy_mkey_mbox_in in;
        struct mlx5_destroy_mkey_mbox_out out;
+       struct mlx5_core_mr *deleted_mr;
        unsigned long flags;
        int err;
 
        memset(&in, 0, sizeof(in));
        memset(&out, 0, sizeof(out));
 
+       write_lock_irqsave(&table->lock, flags);
+       deleted_mr = radix_tree_delete(&table->tree, mlx5_base_mkey(mr->key));
+       write_unlock_irqrestore(&table->lock, flags);
+       if (!deleted_mr) {
+               mlx5_core_warn(dev, "failed radix tree delete of mr 0x%x\n",
+                              mlx5_base_mkey(mr->key));
+               return -ENOENT;
+       }
+
        in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_MKEY);
        in.mkey = cpu_to_be32(mlx5_mkey_to_idx(mr->key));
        err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
@@ -119,10 +134,6 @@ int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr)
        if (out.hdr.status)
                return mlx5_cmd_status_to_err(&out.hdr);
 
-       write_lock_irqsave(&table->lock, flags);
-       radix_tree_delete(&table->tree, mlx5_base_mkey(mr->key));
-       write_unlock_irqrestore(&table->lock, flags);
-
        return err;
 }
 EXPORT_SYMBOL(mlx5_core_destroy_mkey);
index be425ad5e82487e94a91b7371466f7f93ab558eb..61623e9af57424b1298c7db02641d78d6154b751 100644 (file)
@@ -538,6 +538,7 @@ enum rtl_register_content {
        MagicPacket     = (1 << 5),     /* Wake up when receives a Magic Packet */
        LinkUp          = (1 << 4),     /* Wake up when the cable connection is re-established */
        Jumbo_En0       = (1 << 2),     /* 8168 only. Reserved in the 8168b */
+       Rdy_to_L23      = (1 << 1),     /* L23 Enable */
        Beacon_en       = (1 << 0),     /* 8168 only. Reserved in the 8168b */
 
        /* Config4 register */
@@ -4239,6 +4240,8 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
                RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
                break;
        case RTL_GIGA_MAC_VER_40:
+               RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
+               break;
        case RTL_GIGA_MAC_VER_41:
        case RTL_GIGA_MAC_VER_42:
        case RTL_GIGA_MAC_VER_43:
@@ -4897,6 +4900,21 @@ static void rtl_enable_clock_request(struct pci_dev *pdev)
                                 PCI_EXP_LNKCTL_CLKREQ_EN);
 }
 
+static void rtl_pcie_state_l2l3_enable(struct rtl8169_private *tp, bool enable)
+{
+       void __iomem *ioaddr = tp->mmio_addr;
+       u8 data;
+
+       data = RTL_R8(Config3);
+
+       if (enable)
+               data |= Rdy_to_L23;
+       else
+               data &= ~Rdy_to_L23;
+
+       RTL_W8(Config3, data);
+}
+
 #define R8168_CPCMD_QUIRK_MASK (\
        EnableBist | \
        Mac_dbgo_oe | \
@@ -5246,6 +5264,7 @@ static void rtl_hw_start_8411(struct rtl8169_private *tp)
        };
 
        rtl_hw_start_8168f(tp);
+       rtl_pcie_state_l2l3_enable(tp, false);
 
        rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
 
@@ -5284,6 +5303,8 @@ static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
 
        rtl_w1w0_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06, ERIAR_EXGMAC);
        rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC);
+
+       rtl_pcie_state_l2l3_enable(tp, false);
 }
 
 static void rtl_hw_start_8168g_2(struct rtl8169_private *tp)
@@ -5536,6 +5557,8 @@ static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
        RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
 
        rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
+
+       rtl_pcie_state_l2l3_enable(tp, false);
 }
 
 static void rtl_hw_start_8105e_2(struct rtl8169_private *tp)
@@ -5571,6 +5594,8 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp)
        rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
        rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
        rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00, ERIAR_EXGMAC);
+
+       rtl_pcie_state_l2l3_enable(tp, false);
 }
 
 static void rtl_hw_start_8106(struct rtl8169_private *tp)
@@ -5583,6 +5608,8 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp)
        RTL_W32(MISC, (RTL_R32(MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN);
        RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
        RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN);
+
+       rtl_pcie_state_l2l3_enable(tp, false);
 }
 
 static void rtl_hw_start_8101(struct net_device *dev)
index b3e148ef568399cd7ca6c6b4d68f20ec886845c0..9d3748361a1e585b3ca4c06b8a761395d01f8979 100644 (file)
@@ -320,11 +320,8 @@ static void dwmac1000_set_eee_timer(void __iomem *ioaddr, int ls, int tw)
 
 static void dwmac1000_ctrl_ane(void __iomem *ioaddr, bool restart)
 {
-       u32 value;
-
-       value = readl(ioaddr + GMAC_AN_CTRL);
        /* auto negotiation enable and External Loopback enable */
-       value = GMAC_AN_CTRL_ANE | GMAC_AN_CTRL_ELE;
+       u32 value = GMAC_AN_CTRL_ANE | GMAC_AN_CTRL_ELE;
 
        if (restart)
                value |= GMAC_AN_CTRL_RAN;
index 7e6628a91514d595f443a71cdd515fc019494909..1e2bcf5f89e13837b03bec3da31b48113441eb47 100644 (file)
@@ -145,7 +145,7 @@ static void enh_desc_get_ext_status(void *data, struct stmmac_extra_stats *x,
                        x->rx_msg_type_delay_req++;
                else if (p->des4.erx.msg_type == RDES_EXT_DELAY_RESP)
                        x->rx_msg_type_delay_resp++;
-               else if (p->des4.erx.msg_type == RDES_EXT_DELAY_REQ)
+               else if (p->des4.erx.msg_type == RDES_EXT_PDELAY_REQ)
                        x->rx_msg_type_pdelay_req++;
                else if (p->des4.erx.msg_type == RDES_EXT_PDELAY_RESP)
                        x->rx_msg_type_pdelay_resp++;
index 1c24a8f368bd86a25835053de21ee6f96df78adb..fd411d6e19a2f4e4d37675f1d27bd011584b8fa0 100644 (file)
@@ -1083,6 +1083,24 @@ static struct vnet *vnet_find_or_create(const u64 *local_mac)
        return vp;
 }
 
+static void vnet_cleanup(void)
+{
+       struct vnet *vp;
+       struct net_device *dev;
+
+       mutex_lock(&vnet_list_mutex);
+       while (!list_empty(&vnet_list)) {
+               vp = list_first_entry(&vnet_list, struct vnet, list);
+               list_del(&vp->list);
+               dev = vp->dev;
+               /* vio_unregister_driver() should have cleaned up port_list */
+               BUG_ON(!list_empty(&vp->port_list));
+               unregister_netdev(dev);
+               free_netdev(dev);
+       }
+       mutex_unlock(&vnet_list_mutex);
+}
+
 static const char *local_mac_prop = "local-mac-address";
 
 static struct vnet *vnet_find_parent(struct mdesc_handle *hp,
@@ -1240,7 +1258,6 @@ static int vnet_port_remove(struct vio_dev *vdev)
 
                kfree(port);
 
-               unregister_netdev(vp->dev);
        }
        return 0;
 }
@@ -1268,6 +1285,7 @@ static int __init vnet_init(void)
 static void __exit vnet_exit(void)
 {
        vio_unregister_driver(&vnet_port_driver);
+       vnet_cleanup();
 }
 
 module_init(vnet_init);
index eb78203cd58e24a1eac5d197f99382589edfbe25..2aa57270838fb6e67ec6f54106cb21391e7fcd25 100644 (file)
@@ -291,7 +291,11 @@ static int         dfx_hw_dma_uninit(DFX_board_t *bp, PI_UINT32 type);
 
 static int             dfx_rcv_init(DFX_board_t *bp, int get_buffers);
 static void            dfx_rcv_queue_process(DFX_board_t *bp);
+#ifdef DYNAMIC_BUFFERS
 static void            dfx_rcv_flush(DFX_board_t *bp);
+#else
+static inline void     dfx_rcv_flush(DFX_board_t *bp) {}
+#endif
 
 static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb,
                                     struct net_device *dev);
@@ -2849,7 +2853,7 @@ static int dfx_hw_dma_uninit(DFX_board_t *bp, PI_UINT32 type)
  *     Align an sk_buff to a boundary power of 2
  *
  */
-
+#ifdef DYNAMIC_BUFFERS
 static void my_skb_align(struct sk_buff *skb, int n)
 {
        unsigned long x = (unsigned long)skb->data;
@@ -2859,7 +2863,7 @@ static void my_skb_align(struct sk_buff *skb, int n)
 
        skb_reserve(skb, v - x);
 }
-
+#endif
 
 /*
  * ================
@@ -3074,10 +3078,7 @@ static void dfx_rcv_queue_process(
                                        break;
                                        }
                                else {
-#ifndef DYNAMIC_BUFFERS
-                                       if (! rx_in_place)
-#endif
-                                       {
+                                       if (!rx_in_place) {
                                                /* Receive buffer allocated, pass receive packet up */
 
                                                skb_copy_to_linear_data(skb,
@@ -3453,10 +3454,6 @@ static void dfx_rcv_flush( DFX_board_t *bp )
                }
 
        }
-#else
-static inline void dfx_rcv_flush( DFX_board_t *bp )
-{
-}
 #endif /* DYNAMIC_BUFFERS */
 
 /*
index 6a999e6814a073a2a4bf33ee944d3c32c4a085eb..9408157a246c8e20cc9de5ec018bdbfc42d7cf34 100644 (file)
@@ -1323,15 +1323,15 @@ static bool dp83640_rxtstamp(struct phy_device *phydev,
 {
        struct dp83640_private *dp83640 = phydev->priv;
 
-       if (!dp83640->hwts_rx_en)
-               return false;
-
        if (is_status_frame(skb, type)) {
                decode_status_frame(dp83640, skb);
                kfree_skb(skb);
                return true;
        }
 
+       if (!dp83640->hwts_rx_en)
+               return false;
+
        SKB_PTP_TYPE(skb) = type;
        skb_queue_tail(&dp83640->rx_queue, skb);
        schedule_work(&dp83640->ts_work);
index 2e58aa54484c9ca4e3154e231a3af2766bc84933..4eaadcfcb0fe5ed2d5bd82a4632989916ade90e2 100644 (file)
@@ -187,6 +187,50 @@ struct mii_bus *of_mdio_find_bus(struct device_node *mdio_bus_np)
        return d ? to_mii_bus(d) : NULL;
 }
 EXPORT_SYMBOL(of_mdio_find_bus);
+
+/* Walk the list of subnodes of a mdio bus and look for a node that matches the
+ * phy's address with its 'reg' property. If found, set the of_node pointer for
+ * the phy. This allows auto-probed pyh devices to be supplied with information
+ * passed in via DT.
+ */
+static void of_mdiobus_link_phydev(struct mii_bus *mdio,
+                                  struct phy_device *phydev)
+{
+       struct device *dev = &phydev->dev;
+       struct device_node *child;
+
+       if (dev->of_node || !mdio->dev.of_node)
+               return;
+
+       for_each_available_child_of_node(mdio->dev.of_node, child) {
+               int addr;
+               int ret;
+
+               ret = of_property_read_u32(child, "reg", &addr);
+               if (ret < 0) {
+                       dev_err(dev, "%s has invalid PHY address\n",
+                               child->full_name);
+                       continue;
+               }
+
+               /* A PHY must have a reg property in the range [0-31] */
+               if (addr >= PHY_MAX_ADDR) {
+                       dev_err(dev, "%s PHY address %i is too large\n",
+                               child->full_name, addr);
+                       continue;
+               }
+
+               if (addr == phydev->addr) {
+                       dev->of_node = child;
+                       return;
+               }
+       }
+}
+#else /* !IS_ENABLED(CONFIG_OF_MDIO) */
+static inline void of_mdiobus_link_phydev(struct mii_bus *mdio,
+                                         struct phy_device *phydev)
+{
+}
 #endif
 
 /**
index 91d6c1272fcf0ae4a655fa74ca6b91fb578a6108..d5b77ef3a2100c3ce42ad75f4e1c9fe981f046e9 100644 (file)
@@ -539,7 +539,7 @@ static int get_filter(void __user *arg, struct sock_filter **p)
 {
        struct sock_fprog uprog;
        struct sock_filter *code = NULL;
-       int len, err;
+       int len;
 
        if (copy_from_user(&uprog, arg, sizeof(uprog)))
                return -EFAULT;
@@ -554,12 +554,6 @@ static int get_filter(void __user *arg, struct sock_filter **p)
        if (IS_ERR(code))
                return PTR_ERR(code);
 
-       err = sk_chk_filter(code, uprog.len);
-       if (err) {
-               kfree(code);
-               return err;
-       }
-
        *p = code;
        return uprog.len;
 }
@@ -763,10 +757,15 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                        };
 
                        ppp_lock(ppp);
-                       if (ppp->pass_filter)
+                       if (ppp->pass_filter) {
                                sk_unattached_filter_destroy(ppp->pass_filter);
-                       err = sk_unattached_filter_create(&ppp->pass_filter,
-                                                         &fprog);
+                               ppp->pass_filter = NULL;
+                       }
+                       if (fprog.filter != NULL)
+                               err = sk_unattached_filter_create(&ppp->pass_filter,
+                                                                 &fprog);
+                       else
+                               err = 0;
                        kfree(code);
                        ppp_unlock(ppp);
                }
@@ -784,10 +783,15 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                        };
 
                        ppp_lock(ppp);
-                       if (ppp->active_filter)
+                       if (ppp->active_filter) {
                                sk_unattached_filter_destroy(ppp->active_filter);
-                       err = sk_unattached_filter_create(&ppp->active_filter,
-                                                         &fprog);
+                               ppp->active_filter = NULL;
+                       }
+                       if (fprog.filter != NULL)
+                               err = sk_unattached_filter_create(&ppp->active_filter,
+                                                                 &fprog);
+                       else
+                               err = 0;
                        kfree(code);
                        ppp_unlock(ppp);
                }
index 2ea7efd118577169f52c8b353148e53a8a00b8b1..6c9c16d76935f5db5db13bdb54b0e6530b15c8ed 100644 (file)
@@ -675,7 +675,7 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
                po->chan.hdrlen = (sizeof(struct pppoe_hdr) +
                                   dev->hard_header_len);
 
-               po->chan.mtu = dev->mtu - sizeof(struct pppoe_hdr);
+               po->chan.mtu = dev->mtu - sizeof(struct pppoe_hdr) - 2;
                po->chan.private = sk;
                po->chan.ops = &pppoe_chan_ops;
 
index a3a05869309df6a1ac34cdb00c6ff4d031dc921b..a4272ed62da865170cd9de7622e7c48875ed9f5c 100644 (file)
@@ -258,10 +258,8 @@ struct hso_serial {
         * so as not to drop characters on the floor.
         */
        int  curr_rx_urb_idx;
-       u16  curr_rx_urb_offset;
        u8   rx_urb_filled[MAX_RX_URBS];
        struct tasklet_struct unthrottle_tasklet;
-       struct work_struct    retry_unthrottle_workqueue;
 };
 
 struct hso_device {
@@ -1252,14 +1250,6 @@ static   void hso_unthrottle(struct tty_struct *tty)
        tasklet_hi_schedule(&serial->unthrottle_tasklet);
 }
 
-static void hso_unthrottle_workfunc(struct work_struct *work)
-{
-       struct hso_serial *serial =
-           container_of(work, struct hso_serial,
-                        retry_unthrottle_workqueue);
-       hso_unthrottle_tasklet(serial);
-}
-
 /* open the requested serial port */
 static int hso_serial_open(struct tty_struct *tty, struct file *filp)
 {
@@ -1295,8 +1285,6 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
                tasklet_init(&serial->unthrottle_tasklet,
                             (void (*)(unsigned long))hso_unthrottle_tasklet,
                             (unsigned long)serial);
-               INIT_WORK(&serial->retry_unthrottle_workqueue,
-                         hso_unthrottle_workfunc);
                result = hso_start_serial_device(serial->parent, GFP_KERNEL);
                if (result) {
                        hso_stop_serial_device(serial->parent);
@@ -1345,7 +1333,6 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
                if (!usb_gone)
                        hso_stop_serial_device(serial->parent);
                tasklet_kill(&serial->unthrottle_tasklet);
-               cancel_work_sync(&serial->retry_unthrottle_workqueue);
        }
 
        if (!usb_gone)
@@ -2013,8 +2000,7 @@ static void ctrl_callback(struct urb *urb)
 static int put_rxbuf_data(struct urb *urb, struct hso_serial *serial)
 {
        struct tty_struct *tty;
-       int write_length_remaining = 0;
-       int curr_write_len;
+       int count;
 
        /* Sanity check */
        if (urb == NULL || serial == NULL) {
@@ -2024,29 +2010,28 @@ static int put_rxbuf_data(struct urb *urb, struct hso_serial *serial)
 
        tty = tty_port_tty_get(&serial->port);
 
+       if (tty && test_bit(TTY_THROTTLED, &tty->flags)) {
+               tty_kref_put(tty);
+               return -1;
+       }
+
        /* Push data to tty */
-       write_length_remaining = urb->actual_length -
-               serial->curr_rx_urb_offset;
        D1("data to push to tty");
-       while (write_length_remaining) {
-               if (tty && test_bit(TTY_THROTTLED, &tty->flags)) {
-                       tty_kref_put(tty);
-                       return -1;
-               }
-               curr_write_len = tty_insert_flip_string(&serial->port,
-                       urb->transfer_buffer + serial->curr_rx_urb_offset,
-                       write_length_remaining);
-               serial->curr_rx_urb_offset += curr_write_len;
-               write_length_remaining -= curr_write_len;
+       count = tty_buffer_request_room(&serial->port, urb->actual_length);
+       if (count >= urb->actual_length) {
+               tty_insert_flip_string(&serial->port, urb->transfer_buffer,
+                                      urb->actual_length);
                tty_flip_buffer_push(&serial->port);
+       } else {
+               dev_warn(&serial->parent->usb->dev,
+                        "dropping data, %d bytes lost\n", urb->actual_length);
        }
+
        tty_kref_put(tty);
 
-       if (write_length_remaining == 0) {
-               serial->curr_rx_urb_offset = 0;
-               serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 0;
-       }
-       return write_length_remaining;
+       serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 0;
+
+       return 0;
 }
 
 
@@ -2217,7 +2202,6 @@ static int hso_stop_serial_device(struct hso_device *hso_dev)
                }
        }
        serial->curr_rx_urb_idx = 0;
-       serial->curr_rx_urb_offset = 0;
 
        if (serial->tx_urb)
                usb_kill_urb(serial->tx_urb);
index 5d95a13dbe2aa5cb44f6f6ff9f8ab26b47d0c418..735f7dadb9a0740dea86ed58d55df1ae58edf9ef 100644 (file)
@@ -194,6 +194,9 @@ static const struct usb_device_id huawei_cdc_ncm_devs[] = {
        { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x76),
          .driver_info = (unsigned long)&huawei_cdc_ncm_info,
        },
+       { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x03, 0x16),
+         .driver_info = (unsigned long)&huawei_cdc_ncm_info,
+       },
 
        /* Terminating entry */
        {
index cf62d7e8329f11858859257c170b605c6b9a0940..22756db53dcacc3138fd000cad8dbda968948fb8 100644 (file)
@@ -667,6 +667,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x05c6, 0x9084, 4)},
        {QMI_FIXED_INTF(0x05c6, 0x920d, 0)},
        {QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
+       {QMI_FIXED_INTF(0x0846, 0x68a2, 8)},
        {QMI_FIXED_INTF(0x12d1, 0x140c, 1)},    /* Huawei E173 */
        {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)},    /* Huawei E1820 */
        {QMI_FIXED_INTF(0x16d8, 0x6003, 0)},    /* CMOTech 6003 */
@@ -741,6 +742,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x19d2, 0x1424, 2)},
        {QMI_FIXED_INTF(0x19d2, 0x1425, 2)},
        {QMI_FIXED_INTF(0x19d2, 0x1426, 2)},    /* ZTE MF91 */
+       {QMI_FIXED_INTF(0x19d2, 0x1428, 2)},    /* Telewell TW-LTE 4G v2 */
        {QMI_FIXED_INTF(0x19d2, 0x2002, 4)},    /* ZTE (Vodafone) K3765-Z */
        {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)},    /* Sierra Wireless MC7700 */
        {QMI_FIXED_INTF(0x114f, 0x68a2, 8)},    /* Sierra Wireless MC7750 */
@@ -756,6 +758,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x1199, 0x9054, 8)},    /* Sierra Wireless Modem */
        {QMI_FIXED_INTF(0x1199, 0x9055, 8)},    /* Netgear AirCard 341U */
        {QMI_FIXED_INTF(0x1199, 0x9056, 8)},    /* Sierra Wireless Modem */
+       {QMI_FIXED_INTF(0x1199, 0x9057, 8)},
        {QMI_FIXED_INTF(0x1199, 0x9061, 8)},    /* Sierra Wireless Modem */
        {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)},    /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
        {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)},    /* Alcatel L800MA */
index 25431965a625a63d99fd3fa6d3167183ea45606a..7bad2d316637ab1ac8f7d83d197facbaf3e4495e 100644 (file)
@@ -1359,7 +1359,7 @@ static void r8152_csum_workaround(struct r8152 *tp, struct sk_buff *skb,
                struct sk_buff_head seg_list;
                struct sk_buff *segs, *nskb;
 
-               features &= ~(NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO);
+               features &= ~(NETIF_F_SG | NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
                segs = skb_gso_segment(skb, features);
                if (IS_ERR(segs) || !segs)
                        goto drop;
@@ -3204,8 +3204,13 @@ static void rtl8152_get_ethtool_stats(struct net_device *dev,
        struct r8152 *tp = netdev_priv(dev);
        struct tally_counter tally;
 
+       if (usb_autopm_get_interface(tp->intf) < 0)
+               return;
+
        generic_ocp_read(tp, PLA_TALLYCNT, sizeof(tally), &tally, MCU_TYPE_PLA);
 
+       usb_autopm_put_interface(tp->intf);
+
        data[0] = le64_to_cpu(tally.tx_packets);
        data[1] = le64_to_cpu(tally.rx_packets);
        data[2] = le64_to_cpu(tally.tx_errors);
index 424db65e43962545b1746df63deb23833bf0c18b..d07bf4cb893f878c2a2a7f40e5c5691304fbdbf5 100644 (file)
@@ -1714,6 +1714,18 @@ static int smsc95xx_resume(struct usb_interface *intf)
        return ret;
 }
 
+static int smsc95xx_reset_resume(struct usb_interface *intf)
+{
+       struct usbnet *dev = usb_get_intfdata(intf);
+       int ret;
+
+       ret = smsc95xx_reset(dev);
+       if (ret < 0)
+               return ret;
+
+       return smsc95xx_resume(intf);
+}
+
 static void smsc95xx_rx_csum_offload(struct sk_buff *skb)
 {
        skb->csum = *(u16 *)(skb_tail_pointer(skb) - 2);
@@ -2004,7 +2016,7 @@ static struct usb_driver smsc95xx_driver = {
        .probe          = usbnet_probe,
        .suspend        = smsc95xx_suspend,
        .resume         = smsc95xx_resume,
-       .reset_resume   = smsc95xx_resume,
+       .reset_resume   = smsc95xx_reset_resume,
        .disconnect     = usbnet_disconnect,
        .disable_hub_initiated_lpm = 1,
        .supports_autosuspend = 1,
index 93ace042d0aa71b007ec80ab638a5cdbcaa790dd..1f041271f7fec8ec2346808fc0c2cb81ee75570e 100644 (file)
@@ -2363,7 +2363,7 @@ static char *type_strings[] = {
        "FarSync TE1"
 };
 
-static void
+static int
 fst_init_card(struct fst_card_info *card)
 {
        int i;
@@ -2374,24 +2374,21 @@ fst_init_card(struct fst_card_info *card)
         * we'll have to revise it in some way then.
         */
        for (i = 0; i < card->nports; i++) {
-                err = register_hdlc_device(card->ports[i].dev);
-                if (err < 0) {
-                       int j;
+               err = register_hdlc_device(card->ports[i].dev);
+               if (err < 0) {
                        pr_err("Cannot register HDLC device for port %d (errno %d)\n",
-                              i, -err);
-                       for (j = i; j < card->nports; j++) {
-                               free_netdev(card->ports[j].dev);
-                               card->ports[j].dev = NULL;
-                       }
-                        card->nports = i;
-                        break;
-                }
+                               i, -err);
+                       while (i--)
+                               unregister_hdlc_device(card->ports[i].dev);
+                       return err;
+               }
        }
 
        pr_info("%s-%s: %s IRQ%d, %d ports\n",
                port_to_dev(&card->ports[0])->name,
                port_to_dev(&card->ports[card->nports - 1])->name,
                type_strings[card->type], card->irq, card->nports);
+       return 0;
 }
 
 static const struct net_device_ops fst_ops = {
@@ -2447,15 +2444,12 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        /* Try to enable the device */
        if ((err = pci_enable_device(pdev)) != 0) {
                pr_err("Failed to enable card. Err %d\n", -err);
-               kfree(card);
-               return err;
+               goto enable_fail;
        }
 
        if ((err = pci_request_regions(pdev, "FarSync")) !=0) {
                pr_err("Failed to allocate regions. Err %d\n", -err);
-               pci_disable_device(pdev);
-               kfree(card);
-               return err;
+               goto regions_fail;
        }
 
        /* Get virtual addresses of memory regions */
@@ -2464,30 +2458,21 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        card->phys_ctlmem = pci_resource_start(pdev, 3);
        if ((card->mem = ioremap(card->phys_mem, FST_MEMSIZE)) == NULL) {
                pr_err("Physical memory remap failed\n");
-               pci_release_regions(pdev);
-               pci_disable_device(pdev);
-               kfree(card);
-               return -ENODEV;
+               err = -ENODEV;
+               goto ioremap_physmem_fail;
        }
        if ((card->ctlmem = ioremap(card->phys_ctlmem, 0x10)) == NULL) {
                pr_err("Control memory remap failed\n");
-               pci_release_regions(pdev);
-               pci_disable_device(pdev);
-               iounmap(card->mem);
-               kfree(card);
-               return -ENODEV;
+               err = -ENODEV;
+               goto ioremap_ctlmem_fail;
        }
        dbg(DBG_PCI, "kernel mem %p, ctlmem %p\n", card->mem, card->ctlmem);
 
        /* Register the interrupt handler */
        if (request_irq(pdev->irq, fst_intr, IRQF_SHARED, FST_DEV_NAME, card)) {
                pr_err("Unable to register interrupt %d\n", card->irq);
-               pci_release_regions(pdev);
-               pci_disable_device(pdev);
-               iounmap(card->ctlmem);
-               iounmap(card->mem);
-               kfree(card);
-               return -ENODEV;
+               err = -ENODEV;
+               goto irq_fail;
        }
 
        /* Record info we need */
@@ -2513,13 +2498,8 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                        while (i--)
                                free_netdev(card->ports[i].dev);
                        pr_err("FarSync: out of memory\n");
-                        free_irq(card->irq, card);
-                        pci_release_regions(pdev);
-                        pci_disable_device(pdev);
-                        iounmap(card->ctlmem);
-                        iounmap(card->mem);
-                        kfree(card);
-                        return -ENODEV;
+                       err = -ENOMEM;
+                       goto hdlcdev_fail;
                }
                card->ports[i].dev    = dev;
                 card->ports[i].card   = card;
@@ -2565,9 +2545,16 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        pci_set_drvdata(pdev, card);
 
        /* Remainder of card setup */
+       if (no_of_cards_added >= FST_MAX_CARDS) {
+               pr_err("FarSync: too many cards\n");
+               err = -ENOMEM;
+               goto card_array_fail;
+       }
        fst_card_array[no_of_cards_added] = card;
        card->card_no = no_of_cards_added++;    /* Record instance and bump it */
-       fst_init_card(card);
+       err = fst_init_card(card);
+       if (err)
+               goto init_card_fail;
        if (card->family == FST_FAMILY_TXU) {
                /*
                 * Allocate a dma buffer for transmit and receives
@@ -2577,29 +2564,46 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                                         &card->rx_dma_handle_card);
                if (card->rx_dma_handle_host == NULL) {
                        pr_err("Could not allocate rx dma buffer\n");
-                       fst_disable_intr(card);
-                       pci_release_regions(pdev);
-                       pci_disable_device(pdev);
-                       iounmap(card->ctlmem);
-                       iounmap(card->mem);
-                       kfree(card);
-                       return -ENOMEM;
+                       err = -ENOMEM;
+                       goto rx_dma_fail;
                }
                card->tx_dma_handle_host =
                    pci_alloc_consistent(card->device, FST_MAX_MTU,
                                         &card->tx_dma_handle_card);
                if (card->tx_dma_handle_host == NULL) {
                        pr_err("Could not allocate tx dma buffer\n");
-                       fst_disable_intr(card);
-                       pci_release_regions(pdev);
-                       pci_disable_device(pdev);
-                       iounmap(card->ctlmem);
-                       iounmap(card->mem);
-                       kfree(card);
-                       return -ENOMEM;
+                       err = -ENOMEM;
+                       goto tx_dma_fail;
                }
        }
        return 0;               /* Success */
+
+tx_dma_fail:
+       pci_free_consistent(card->device, FST_MAX_MTU,
+                           card->rx_dma_handle_host,
+                           card->rx_dma_handle_card);
+rx_dma_fail:
+       fst_disable_intr(card);
+       for (i = 0 ; i < card->nports ; i++)
+               unregister_hdlc_device(card->ports[i].dev);
+init_card_fail:
+       fst_card_array[card->card_no] = NULL;
+card_array_fail:
+       for (i = 0 ; i < card->nports ; i++)
+               free_netdev(card->ports[i].dev);
+hdlcdev_fail:
+       free_irq(card->irq, card);
+irq_fail:
+       iounmap(card->ctlmem);
+ioremap_ctlmem_fail:
+       iounmap(card->mem);
+ioremap_physmem_fail:
+       pci_release_regions(pdev);
+regions_fail:
+       pci_disable_device(pdev);
+enable_fail:
+       kfree(card);
+       return err;
 }
 
 /*
index 5895f19786919f6cfb36b97273a95bc1462d1c10..fa9fdfa128c1e6b732c74907fb43504c8ec2ad4c 100644 (file)
@@ -122,8 +122,12 @@ static int x25_asy_change_mtu(struct net_device *dev, int newmtu)
 {
        struct x25_asy *sl = netdev_priv(dev);
        unsigned char *xbuff, *rbuff;
-       int len = 2 * newmtu;
+       int len;
 
+       if (newmtu > 65534)
+               return -EINVAL;
+
+       len = 2 * newmtu;
        xbuff = kmalloc(len + 4, GFP_ATOMIC);
        rbuff = kmalloc(len + 4, GFP_ATOMIC);
 
index 82017f56e6613a484b224a2d9d41f8aa796f16c9..e6c56c5bb0f608c3c7377b06cb9d3ff491caee42 100644 (file)
@@ -795,7 +795,11 @@ int ath10k_core_start(struct ath10k *ar)
        if (status)
                goto err_htc_stop;
 
-       ar->free_vdev_map = (1 << TARGET_NUM_VDEVS) - 1;
+       if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
+               ar->free_vdev_map = (1 << TARGET_10X_NUM_VDEVS) - 1;
+       else
+               ar->free_vdev_map = (1 << TARGET_NUM_VDEVS) - 1;
+
        INIT_LIST_HEAD(&ar->arvifs);
 
        if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
index 6c102b1312ff955db686022aa76e1a7ccc6e42b3..eebc860c36550a4ae65bb3910d799a86c0e8231a 100644 (file)
@@ -312,7 +312,6 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
        int msdu_len, msdu_chaining = 0;
        struct sk_buff *msdu;
        struct htt_rx_desc *rx_desc;
-       bool corrupted = false;
 
        lockdep_assert_held(&htt->rx_ring.lock);
 
@@ -439,9 +438,6 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
                last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) &
                                RX_MSDU_END_INFO0_LAST_MSDU;
 
-               if (msdu_chaining && !last_msdu)
-                       corrupted = true;
-
                if (last_msdu) {
                        msdu->next = NULL;
                        break;
@@ -456,20 +452,6 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
        if (*head_msdu == NULL)
                msdu_chaining = -1;
 
-       /*
-        * Apparently FW sometimes reports weird chained MSDU sequences with
-        * more than one rx descriptor. This seems like a bug but needs more
-        * analyzing. For the time being fix it by dropping such sequences to
-        * avoid blowing up the host system.
-        */
-       if (corrupted) {
-               ath10k_warn("failed to pop chained msdus, dropping\n");
-               ath10k_htt_rx_free_msdu_chain(*head_msdu);
-               *head_msdu = NULL;
-               *tail_msdu = NULL;
-               msdu_chaining = -EINVAL;
-       }
-
        /*
         * Don't refill the ring yet.
         *
index 6db51a666f619abedaee11ac4822b917b24b4f3c..d06fcb05adf2517a292ab727467e69d0abf28259 100644 (file)
@@ -1184,8 +1184,6 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo)
        bus->bus_priv.usb = bus_pub;
        dev_set_drvdata(dev, bus);
        bus->ops = &brcmf_usb_bus_ops;
-       bus->chip = bus_pub->devid;
-       bus->chiprev = bus_pub->chiprev;
        bus->proto_type = BRCMF_PROTO_BCDC;
        bus->always_use_fws_queue = true;
 
@@ -1194,6 +1192,9 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo)
                if (ret)
                        goto fail;
        }
+       bus->chip = bus_pub->devid;
+       bus->chiprev = bus_pub->chiprev;
+
        /* request firmware here */
        brcmf_fw_get_firmwares(dev, 0, brcmf_usb_get_fwname(devinfo), NULL,
                               brcmf_usb_probe_phase2);
index ed50de6362ed1d5dcd56b45243ff0b140dcbafe5..6dc5dd3ced44723943934f114c6fde78c98a565f 100644 (file)
@@ -1068,13 +1068,6 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
        /* recalculate basic rates */
        iwl_calc_basic_rates(priv, ctx);
 
-       /*
-        * force CTS-to-self frames protection if RTS-CTS is not preferred
-        * one aggregation protection method
-        */
-       if (!priv->hw_params.use_rts_for_aggregation)
-               ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
-
        if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) ||
            !(ctx->staging.flags & RXON_FLG_BAND_24G_MSK))
                ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
@@ -1480,11 +1473,6 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
        else
                ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
 
-       if (bss_conf->use_cts_prot)
-               ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
-       else
-               ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
-
        memcpy(ctx->staging.bssid_addr, bss_conf->bssid, ETH_ALEN);
 
        if (vif->type == NL80211_IFTYPE_AP ||
index 0aa7c0085c9fd04554b1a3314a2212638f920a78..b1a33322b9bac9534e02a65c8e431d387f509542 100644 (file)
@@ -88,6 +88,7 @@
  *     P2P client interfaces simultaneously if they are in different bindings.
  * @IWL_UCODE_TLV_FLAGS_P2P_BSS_PS_SCM: support power save on BSS station and
  *     P2P client interfaces simultaneously if they are in same bindings.
+ * @IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT: General support for uAPSD
  * @IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save
  * @IWL_UCODE_TLV_FLAGS_BCAST_FILTERING: uCode supports broadcast filtering.
  * @IWL_UCODE_TLV_FLAGS_GO_UAPSD: AP/GO interfaces support uAPSD clients
index 8b530277763258551cf09292298f8f14be074174..725ba49576bf640a41194cc4539176f70118c9d0 100644 (file)
@@ -667,10 +667,9 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
        if (vif->bss_conf.qos)
                cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA);
 
-       if (vif->bss_conf.use_cts_prot) {
+       if (vif->bss_conf.use_cts_prot)
                cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT);
-               cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_SELF_CTS_EN);
-       }
+
        IWL_DEBUG_RATE(mvm, "use_cts_prot %d, ht_operation_mode %d\n",
                       vif->bss_conf.use_cts_prot,
                       vif->bss_conf.ht_operation_mode);
index 7215f59801863d3b7d72398de8c96c7b73c3902b..9bfb90680cdcb2e6d8d716a04798322c4c9b2174 100644 (file)
@@ -303,6 +303,13 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
                hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
        }
 
+       if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT &&
+           !iwlwifi_mod_params.uapsd_disable) {
+               hw->flags |= IEEE80211_HW_SUPPORTS_UAPSD;
+               hw->uapsd_queues = IWL_UAPSD_AC_INFO;
+               hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
+       }
+
        hw->sta_data_size = sizeof(struct iwl_mvm_sta);
        hw->vif_data_size = sizeof(struct iwl_mvm_vif);
        hw->chanctx_data_size = sizeof(u16);
@@ -1159,8 +1166,12 @@ static void iwl_mvm_bcast_filter_iterator(void *_data, u8 *mac,
 
        bcast_mac = &cmd->macs[mvmvif->id];
 
-       /* enable filtering only for associated stations */
-       if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc)
+       /*
+        * enable filtering only for associated stations, but not for P2P
+        * Clients
+        */
+       if (vif->type != NL80211_IFTYPE_STATION || vif->p2p ||
+           !vif->bss_conf.assoc)
                return;
 
        bcast_mac->default_discard = 1;
@@ -1237,10 +1248,6 @@ static int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm,
        if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING))
                return 0;
 
-       /* bcast filtering isn't supported for P2P client */
-       if (vif->p2p)
-               return 0;
-
        if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
                return 0;
 
index 4b6c7d4bd199ef4dbc20defd15f966e59545d6e3..eac2b424f6a06447a79ba20e9447d4d32fbe5cd6 100644 (file)
@@ -588,9 +588,7 @@ static void iwl_build_scan_cmd(struct iwl_mvm *mvm,
                               struct iwl_scan_offload_cmd *scan,
                               struct iwl_mvm_scan_params *params)
 {
-       scan->channel_count =
-               mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels +
-               mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
+       scan->channel_count = req->n_channels;
        scan->quiet_time = cpu_to_le16(IWL_ACTIVE_QUIET_TIME);
        scan->quiet_plcp_th = cpu_to_le16(IWL_PLCP_QUIET_THRESH);
        scan->good_CRC_th = IWL_GOOD_CRC_TH_DEFAULT;
@@ -669,61 +667,37 @@ static void iwl_build_channel_cfg(struct iwl_mvm *mvm,
                                  struct cfg80211_sched_scan_request *req,
                                  struct iwl_scan_channel_cfg *channels,
                                  enum ieee80211_band band,
-                                 int *head, int *tail,
+                                 int *head,
                                  u32 ssid_bitmap,
                                  struct iwl_mvm_scan_params *params)
 {
-       struct ieee80211_supported_band *s_band;
-       int n_channels = req->n_channels;
-       int i, j, index = 0;
-       bool partial;
+       int i, index = 0;
 
-       /*
-        * We have to configure all supported channels, even if we don't want to
-        * scan on them, but we have to send channels in the order that we want
-        * to scan. So add requested channels to head of the list and others to
-        * the end.
-       */
-       s_band = &mvm->nvm_data->bands[band];
-
-       for (i = 0; i < s_band->n_channels && *head <= *tail; i++) {
-               partial = false;
-               for (j = 0; j < n_channels; j++)
-                       if (s_band->channels[i].center_freq ==
-                                               req->channels[j]->center_freq) {
-                               index = *head;
-                               (*head)++;
-                               /*
-                                * Channels that came with the request will be
-                                * in partial scan .
-                                */
-                               partial = true;
-                               break;
-                       }
-               if (!partial) {
-                       index = *tail;
-                       (*tail)--;
-               }
-               channels->channel_number[index] =
-                       cpu_to_le16(ieee80211_frequency_to_channel(
-                                       s_band->channels[i].center_freq));
+       for (i = 0; i < req->n_channels; i++) {
+               struct ieee80211_channel *chan = req->channels[i];
+
+               if (chan->band != band)
+                       continue;
+
+               index = *head;
+               (*head)++;
+
+               channels->channel_number[index] = cpu_to_le16(chan->hw_value);
                channels->dwell_time[index][0] = params->dwell[band].active;
                channels->dwell_time[index][1] = params->dwell[band].passive;
 
                channels->iter_count[index] = cpu_to_le16(1);
                channels->iter_interval[index] = 0;
 
-               if (!(s_band->channels[i].flags & IEEE80211_CHAN_NO_IR))
+               if (!(chan->flags & IEEE80211_CHAN_NO_IR))
                        channels->type[index] |=
                                cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_ACTIVE);
 
                channels->type[index] |=
-                               cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_FULL);
-               if (partial)
-                       channels->type[index] |=
-                               cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_PARTIAL);
+                               cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_FULL |
+                                           IWL_SCAN_OFFLOAD_CHANNEL_PARTIAL);
 
-               if (s_band->channels[i].flags & IEEE80211_CHAN_NO_HT40)
+               if (chan->flags & IEEE80211_CHAN_NO_HT40)
                        channels->type[index] |=
                                cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_NARROW);
 
@@ -740,7 +714,6 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
        int band_2ghz = mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels;
        int band_5ghz = mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
        int head = 0;
-       int tail = band_2ghz + band_5ghz - 1;
        u32 ssid_bitmap;
        int cmd_len;
        int ret;
@@ -772,7 +745,7 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
                                              &scan_cfg->scan_cmd.tx_cmd[0],
                                              scan_cfg->data);
                iwl_build_channel_cfg(mvm, req, &scan_cfg->channel_cfg,
-                                     IEEE80211_BAND_2GHZ, &head, &tail,
+                                     IEEE80211_BAND_2GHZ, &head,
                                      ssid_bitmap, &params);
        }
        if (band_5ghz) {
@@ -782,7 +755,7 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
                                              scan_cfg->data +
                                                SCAN_OFFLOAD_PROBE_REQ_SIZE);
                iwl_build_channel_cfg(mvm, req, &scan_cfg->channel_cfg,
-                                     IEEE80211_BAND_5GHZ, &head, &tail,
+                                     IEEE80211_BAND_5GHZ, &head,
                                      ssid_bitmap, &params);
        }
 
index 7091a18d5a72f9880f7a47f7a949fefd9de3b9f1..98950e45c7b01e2babd58cf674feff2054e53c3a 100644 (file)
@@ -367,6 +367,7 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
        {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x5412, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x095A, 0x5510, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x5400, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x5000, iwl7265_2n_cfg)},
@@ -380,7 +381,7 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
        {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x095A, 0x9200, iwl7265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x095B, 0x9200, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)},
index 5b32106182f81c11fbc2bd985166dad198f341b1..fe0f66f735076d68aa7cef6e19ab791d34411b4a 100644 (file)
@@ -185,6 +185,7 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
        skb_reserve(skb_aggr, headroom + sizeof(struct txpd));
        tx_info_aggr =  MWIFIEX_SKB_TXCB(skb_aggr);
 
+       memset(tx_info_aggr, 0, sizeof(*tx_info_aggr));
        tx_info_aggr->bss_type = tx_info_src->bss_type;
        tx_info_aggr->bss_num = tx_info_src->bss_num;
 
index e95dec91a561e1172289dca0d6bbfb35b38add56..b511613bba2d8608f057fd15223c2af33c3be962 100644 (file)
@@ -220,6 +220,7 @@ mwifiex_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
        }
 
        tx_info = MWIFIEX_SKB_TXCB(skb);
+       memset(tx_info, 0, sizeof(*tx_info));
        tx_info->bss_num = priv->bss_num;
        tx_info->bss_type = priv->bss_type;
        tx_info->pkt_len = pkt_len;
index 8dee6c86f4f1dc91e65978b6f7443ac9f00c2118..c161141f6c39ec8c2bcf5d8e9a2a2951c9f94a71 100644 (file)
@@ -453,6 +453,7 @@ int mwifiex_process_event(struct mwifiex_adapter *adapter)
 
        if (skb) {
                rx_info = MWIFIEX_SKB_RXCB(skb);
+               memset(rx_info, 0, sizeof(*rx_info));
                rx_info->bss_num = priv->bss_num;
                rx_info->bss_type = priv->bss_type;
        }
index cbabc12fbda390d063218375eb2b4cadc3911b8f..e91cd0fa5ca81e3585e8173a0fb6a1789cfdaca7 100644 (file)
@@ -645,6 +645,7 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
        }
 
        tx_info = MWIFIEX_SKB_TXCB(skb);
+       memset(tx_info, 0, sizeof(*tx_info));
        tx_info->bss_num = priv->bss_num;
        tx_info->bss_type = priv->bss_type;
        tx_info->pkt_len = skb->len;
index 5fce7e78a36e773c28875a7636a666b50ced36d5..70eb863c724974f94f16f4ea09b1b11b568f7803 100644 (file)
@@ -150,6 +150,7 @@ int mwifiex_send_null_packet(struct mwifiex_private *priv, u8 flags)
                return -1;
 
        tx_info = MWIFIEX_SKB_TXCB(skb);
+       memset(tx_info, 0, sizeof(*tx_info));
        tx_info->bss_num = priv->bss_num;
        tx_info->bss_type = priv->bss_type;
        tx_info->pkt_len = data_len - (sizeof(struct txpd) + INTF_HEADER_LEN);
index e73034fbbde9263b8e234ee7cd7747a1404c8a40..0e88364e0c670a5fe59fccdec3d711d679bf7be3 100644 (file)
@@ -605,6 +605,7 @@ int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv, const u8 *peer,
        }
 
        tx_info = MWIFIEX_SKB_TXCB(skb);
+       memset(tx_info, 0, sizeof(*tx_info));
        tx_info->bss_num = priv->bss_num;
        tx_info->bss_type = priv->bss_type;
 
@@ -760,6 +761,7 @@ int mwifiex_send_tdls_action_frame(struct mwifiex_private *priv, const u8 *peer,
        skb->priority = MWIFIEX_PRIO_VI;
 
        tx_info = MWIFIEX_SKB_TXCB(skb);
+       memset(tx_info, 0, sizeof(*tx_info));
        tx_info->bss_num = priv->bss_num;
        tx_info->bss_type = priv->bss_type;
        tx_info->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT;
index 37f26afd4314326a984213924128d44d40960285..fd7e5b9b4581fa5d44ea60a3476e45aa3054e045 100644 (file)
@@ -55,6 +55,7 @@ int mwifiex_handle_rx_packet(struct mwifiex_adapter *adapter,
                return -1;
        }
 
+       memset(rx_info, 0, sizeof(*rx_info));
        rx_info->bss_num = priv->bss_num;
        rx_info->bss_type = priv->bss_type;
 
index 9a56bc61cb1d29993ebcc057fb4cf058bd1100f2..b0601b91cc4f1310b76f519e5ae3651ebb8fe1c5 100644 (file)
@@ -175,6 +175,7 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv,
        }
 
        tx_info = MWIFIEX_SKB_TXCB(skb);
+       memset(tx_info, 0, sizeof(*tx_info));
        tx_info->bss_num = priv->bss_num;
        tx_info->bss_type = priv->bss_type;
        tx_info->flags |= MWIFIEX_BUF_FLAG_BRIDGED_PKT;
index e11dab2216c6f2c0ed388bae1d41c2b3e50a57a0..832006b5aab158e4e14356001ef8f40a952753a2 100644 (file)
@@ -231,9 +231,12 @@ static enum hrtimer_restart rt2800usb_tx_sta_fifo_timeout(struct hrtimer *timer)
  */
 static int rt2800usb_autorun_detect(struct rt2x00_dev *rt2x00dev)
 {
-       __le32 reg;
+       __le32 *reg;
        u32 fw_mode;
 
+       reg = kmalloc(sizeof(*reg), GFP_KERNEL);
+       if (reg == NULL)
+               return -ENOMEM;
        /* cannot use rt2x00usb_register_read here as it uses different
         * mode (MULTI_READ vs. DEVICE_MODE) and does not pass the
         * magic value USB_MODE_AUTORUN (0x11) to the device, thus the
@@ -241,8 +244,9 @@ static int rt2800usb_autorun_detect(struct rt2x00_dev *rt2x00dev)
         */
        rt2x00usb_vendor_request(rt2x00dev, USB_DEVICE_MODE,
                                 USB_VENDOR_REQUEST_IN, 0, USB_MODE_AUTORUN,
-                                &reg, sizeof(reg), REGISTER_TIMEOUT_FIRMWARE);
-       fw_mode = le32_to_cpu(reg);
+                                reg, sizeof(*reg), REGISTER_TIMEOUT_FIRMWARE);
+       fw_mode = le32_to_cpu(*reg);
+       kfree(reg);
 
        if ((fw_mode & 0x00000003) == 2)
                return 1;
@@ -261,6 +265,7 @@ static int rt2800usb_write_firmware(struct rt2x00_dev *rt2x00dev,
        int status;
        u32 offset;
        u32 length;
+       int retval;
 
        /*
         * Check which section of the firmware we need.
@@ -278,7 +283,10 @@ static int rt2800usb_write_firmware(struct rt2x00_dev *rt2x00dev,
        /*
         * Write firmware to device.
         */
-       if (rt2800usb_autorun_detect(rt2x00dev)) {
+       retval = rt2800usb_autorun_detect(rt2x00dev);
+       if (retval < 0)
+               return retval;
+       if (retval) {
                rt2x00_info(rt2x00dev,
                            "Firmware loading not required - NIC in AutoRun mode\n");
        } else {
@@ -763,7 +771,12 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
  */
 static int rt2800usb_efuse_detect(struct rt2x00_dev *rt2x00dev)
 {
-       if (rt2800usb_autorun_detect(rt2x00dev))
+       int retval;
+
+       retval = rt2800usb_autorun_detect(rt2x00dev);
+       if (retval < 0)
+               return retval;
+       if (retval)
                return 1;
        return rt2800_efuse_detect(rt2x00dev);
 }
@@ -772,7 +785,10 @@ static int rt2800usb_read_eeprom(struct rt2x00_dev *rt2x00dev)
 {
        int retval;
 
-       if (rt2800usb_efuse_detect(rt2x00dev))
+       retval = rt2800usb_efuse_detect(rt2x00dev);
+       if (retval < 0)
+               return retval;
+       if (retval)
                retval = rt2800_read_eeprom_efuse(rt2x00dev);
        else
                retval = rt2x00usb_eeprom_read(rt2x00dev, rt2x00dev->eeprom,
index 1844a47636b67c821f03fc8a565092ab59749426..c65b636bcab9dfb3bdf3528bd3d0fcd30f7a5812 100644 (file)
@@ -1030,14 +1030,21 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
 {
        struct gnttab_map_grant_ref *gop_map = *gopp_map;
        u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
+       /* This always points to the shinfo of the skb being checked, which
+        * could be either the first or the one on the frag_list
+        */
        struct skb_shared_info *shinfo = skb_shinfo(skb);
+       /* If this is non-NULL, we are currently checking the frag_list skb, and
+        * this points to the shinfo of the first one
+        */
+       struct skb_shared_info *first_shinfo = NULL;
        int nr_frags = shinfo->nr_frags;
+       const bool sharedslot = nr_frags &&
+                               frag_get_pending_idx(&shinfo->frags[0]) == pending_idx;
        int i, err;
-       struct sk_buff *first_skb = NULL;
 
        /* Check status of header. */
        err = (*gopp_copy)->status;
-       (*gopp_copy)++;
        if (unlikely(err)) {
                if (net_ratelimit())
                        netdev_dbg(queue->vif->dev,
@@ -1045,8 +1052,12 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
                                   (*gopp_copy)->status,
                                   pending_idx,
                                   (*gopp_copy)->source.u.ref);
-               xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
+               /* The first frag might still have this slot mapped */
+               if (!sharedslot)
+                       xenvif_idx_release(queue, pending_idx,
+                                          XEN_NETIF_RSP_ERROR);
        }
+       (*gopp_copy)++;
 
 check_frags:
        for (i = 0; i < nr_frags; i++, gop_map++) {
@@ -1062,8 +1073,19 @@ check_frags:
                                                pending_idx,
                                                gop_map->handle);
                        /* Had a previous error? Invalidate this fragment. */
-                       if (unlikely(err))
+                       if (unlikely(err)) {
                                xenvif_idx_unmap(queue, pending_idx);
+                               /* If the mapping of the first frag was OK, but
+                                * the header's copy failed, and they are
+                                * sharing a slot, send an error
+                                */
+                               if (i == 0 && sharedslot)
+                                       xenvif_idx_release(queue, pending_idx,
+                                                          XEN_NETIF_RSP_ERROR);
+                               else
+                                       xenvif_idx_release(queue, pending_idx,
+                                                          XEN_NETIF_RSP_OKAY);
+                       }
                        continue;
                }
 
@@ -1075,42 +1097,53 @@ check_frags:
                                   gop_map->status,
                                   pending_idx,
                                   gop_map->ref);
+
                xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
 
                /* Not the first error? Preceding frags already invalidated. */
                if (err)
                        continue;
-               /* First error: invalidate preceding fragments. */
+
+               /* First error: if the header haven't shared a slot with the
+                * first frag, release it as well.
+                */
+               if (!sharedslot)
+                       xenvif_idx_release(queue,
+                                          XENVIF_TX_CB(skb)->pending_idx,
+                                          XEN_NETIF_RSP_OKAY);
+
+               /* Invalidate preceding fragments of this skb. */
                for (j = 0; j < i; j++) {
                        pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
                        xenvif_idx_unmap(queue, pending_idx);
+                       xenvif_idx_release(queue, pending_idx,
+                                          XEN_NETIF_RSP_OKAY);
+               }
+
+               /* And if we found the error while checking the frag_list, unmap
+                * the first skb's frags
+                */
+               if (first_shinfo) {
+                       for (j = 0; j < first_shinfo->nr_frags; j++) {
+                               pending_idx = frag_get_pending_idx(&first_shinfo->frags[j]);
+                               xenvif_idx_unmap(queue, pending_idx);
+                               xenvif_idx_release(queue, pending_idx,
+                                                  XEN_NETIF_RSP_OKAY);
+                       }
                }
 
                /* Remember the error: invalidate all subsequent fragments. */
                err = newerr;
        }
 
-       if (skb_has_frag_list(skb)) {
-               first_skb = skb;
-               skb = shinfo->frag_list;
-               shinfo = skb_shinfo(skb);
+       if (skb_has_frag_list(skb) && !first_shinfo) {
+               first_shinfo = skb_shinfo(skb);
+               shinfo = skb_shinfo(skb_shinfo(skb)->frag_list);
                nr_frags = shinfo->nr_frags;
 
                goto check_frags;
        }
 
-       /* There was a mapping error in the frag_list skb. We have to unmap
-        * the first skb's frags
-        */
-       if (first_skb && err) {
-               int j;
-               shinfo = skb_shinfo(first_skb);
-               for (j = 0; j < shinfo->nr_frags; j++) {
-                       pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
-                       xenvif_idx_unmap(queue, pending_idx);
-               }
-       }
-
        *gopp_map = gop_map;
        return err;
 }
@@ -1518,7 +1551,16 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
 
                /* Check the remap error code. */
                if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) {
+                       /* If there was an error, xenvif_tx_check_gop is
+                        * expected to release all the frags which were mapped,
+                        * so kfree_skb shouldn't do it again
+                        */
                        skb_shinfo(skb)->nr_frags = 0;
+                       if (skb_has_frag_list(skb)) {
+                               struct sk_buff *nskb =
+                                               skb_shinfo(skb)->frag_list;
+                               skb_shinfo(nskb)->nr_frags = 0;
+                       }
                        kfree_skb(skb);
                        continue;
                }
@@ -1822,8 +1864,6 @@ void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
                           tx_unmap_op.status);
                BUG();
        }
-
-       xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_OKAY);
 }
 
 static inline int rx_work_todo(struct xenvif_queue *queue)
index 2ccb4a02368b9fab04b799f3ea1f8de72d17cbe6..055222bae6e4463d00835c894e0d606ea91ddc6f 100644 (file)
@@ -1439,16 +1439,11 @@ static void xennet_disconnect_backend(struct netfront_info *info)
        unsigned int i = 0;
        unsigned int num_queues = info->netdev->real_num_tx_queues;
 
+       netif_carrier_off(info->netdev);
+
        for (i = 0; i < num_queues; ++i) {
                struct netfront_queue *queue = &info->queues[i];
 
-               /* Stop old i/f to prevent errors whilst we rebuild the state. */
-               spin_lock_bh(&queue->rx_lock);
-               spin_lock_irq(&queue->tx_lock);
-               netif_carrier_off(queue->info->netdev);
-               spin_unlock_irq(&queue->tx_lock);
-               spin_unlock_bh(&queue->rx_lock);
-
                if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
                        unbind_from_irqhandler(queue->tx_irq, queue);
                if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) {
@@ -1458,6 +1453,8 @@ static void xennet_disconnect_backend(struct netfront_info *info)
                queue->tx_evtchn = queue->rx_evtchn = 0;
                queue->tx_irq = queue->rx_irq = 0;
 
+               napi_synchronize(&queue->napi);
+
                /* End access and free the pages */
                xennet_end_access(queue->tx_ring_ref, queue->tx.sring);
                xennet_end_access(queue->rx_ring_ref, queue->rx.sring);
@@ -2046,13 +2043,15 @@ static int xennet_connect(struct net_device *dev)
        /* By now, the queue structures have been set up */
        for (j = 0; j < num_queues; ++j) {
                queue = &np->queues[j];
-               spin_lock_bh(&queue->rx_lock);
-               spin_lock_irq(&queue->tx_lock);
 
                /* Step 1: Discard all pending TX packet fragments. */
+               spin_lock_irq(&queue->tx_lock);
                xennet_release_tx_bufs(queue);
+               spin_unlock_irq(&queue->tx_lock);
 
                /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
+               spin_lock_bh(&queue->rx_lock);
+
                for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
                        skb_frag_t *frag;
                        const struct page *page;
@@ -2076,6 +2075,8 @@ static int xennet_connect(struct net_device *dev)
                }
 
                queue->rx.req_prod_pvt = requeue_idx;
+
+               spin_unlock_bh(&queue->rx_lock);
        }
 
        /*
@@ -2087,13 +2088,17 @@ static int xennet_connect(struct net_device *dev)
        netif_carrier_on(np->netdev);
        for (j = 0; j < num_queues; ++j) {
                queue = &np->queues[j];
+
                notify_remote_via_irq(queue->tx_irq);
                if (queue->tx_irq != queue->rx_irq)
                        notify_remote_via_irq(queue->rx_irq);
-               xennet_tx_buf_gc(queue);
-               xennet_alloc_rx_buffers(queue);
 
+               spin_lock_irq(&queue->tx_lock);
+               xennet_tx_buf_gc(queue);
                spin_unlock_irq(&queue->tx_lock);
+
+               spin_lock_bh(&queue->rx_lock);
+               xennet_alloc_rx_buffers(queue);
                spin_unlock_bh(&queue->rx_lock);
        }
 
index a3bf2122a8d5b69d0de7db8c50d302b99021fbfc..401b2453da45ae033c7d334dff28679284202765 100644 (file)
@@ -182,40 +182,6 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
 }
 EXPORT_SYMBOL(of_mdiobus_register);
 
-/**
- * of_mdiobus_link_phydev - Find a device node for a phy
- * @mdio: pointer to mii_bus structure
- * @phydev: phydev for which the of_node pointer should be set
- *
- * Walk the list of subnodes of a mdio bus and look for a node that matches the
- * phy's address with its 'reg' property. If found, set the of_node pointer for
- * the phy. This allows auto-probed pyh devices to be supplied with information
- * passed in via DT.
- */
-void of_mdiobus_link_phydev(struct mii_bus *mdio,
-                           struct phy_device *phydev)
-{
-       struct device *dev = &phydev->dev;
-       struct device_node *child;
-
-       if (dev->of_node || !mdio->dev.of_node)
-               return;
-
-       for_each_available_child_of_node(mdio->dev.of_node, child) {
-               int addr;
-
-               addr = of_mdio_parse_addr(&mdio->dev, child);
-               if (addr < 0)
-                       continue;
-
-               if (addr == phydev->addr) {
-                       dev->of_node = child;
-                       return;
-               }
-       }
-}
-EXPORT_SYMBOL(of_mdiobus_link_phydev);
-
 /* Helper function for of_phy_find_device */
 static int of_phy_match(struct device *dev, void *phy_np)
 {
index 2872ece81f358df7ff139c143e86d58031485b14..44333bd8f90886260e2219acad391dadaa9fd095 100644 (file)
@@ -5,6 +5,12 @@
 # Parport configuration.
 #
 
+config ARCH_MIGHT_HAVE_PC_PARPORT
+       bool
+       help
+         Select this config option from the architecture Kconfig if
+         the architecture might have PC parallel port hardware.
+
 menuconfig PARPORT
        tristate "Parallel port support"
        depends on HAS_IOMEM
@@ -31,12 +37,6 @@ menuconfig PARPORT
 
          If unsure, say Y.
 
-config ARCH_MIGHT_HAVE_PC_PARPORT
-       bool
-       help
-         Select this config option from the architecture Kconfig if
-         the architecture might have PC parallel port hardware.
-
 if PARPORT
 
 config PARPORT_PC
index ca4927ba84334a2b64b09b8b2e9eb60564024f65..37263b0ebfe32570f49a3df7bfc027ab6cd562e0 100644 (file)
 #include "pci.h"
 
 /**
- * pci_acpi_wake_bus - Wake-up notification handler for root buses.
- * @handle: ACPI handle of a device the notification is for.
- * @event: Type of the signaled event.
- * @context: PCI root bus to wake up devices on.
+ * pci_acpi_wake_bus - Root bus wakeup notification fork function.
+ * @work: Work item to handle.
  */
-static void pci_acpi_wake_bus(acpi_handle handle, u32 event, void *context)
+static void pci_acpi_wake_bus(struct work_struct *work)
 {
-       struct pci_bus *pci_bus = context;
+       struct acpi_device *adev;
+       struct acpi_pci_root *root;
 
-       if (event == ACPI_NOTIFY_DEVICE_WAKE && pci_bus)
-               pci_pme_wakeup_bus(pci_bus);
+       adev = container_of(work, struct acpi_device, wakeup.context.work);
+       root = acpi_driver_data(adev);
+       pci_pme_wakeup_bus(root->bus);
 }
 
 /**
- * pci_acpi_wake_dev - Wake-up notification handler for PCI devices.
+ * pci_acpi_wake_dev - PCI device wakeup notification work function.
  * @handle: ACPI handle of a device the notification is for.
- * @event: Type of the signaled event.
- * @context: PCI device object to wake up.
+ * @work: Work item to handle.
  */
-static void pci_acpi_wake_dev(acpi_handle handle, u32 event, void *context)
+static void pci_acpi_wake_dev(struct work_struct *work)
 {
-       struct pci_dev *pci_dev = context;
+       struct acpi_device_wakeup_context *context;
+       struct pci_dev *pci_dev;
 
-       if (event != ACPI_NOTIFY_DEVICE_WAKE || !pci_dev)
-               return;
+       context = container_of(work, struct acpi_device_wakeup_context, work);
+       pci_dev = to_pci_dev(context->dev);
 
        if (pci_dev->pme_poll)
                pci_dev->pme_poll = false;
@@ -65,23 +65,12 @@ static void pci_acpi_wake_dev(acpi_handle handle, u32 event, void *context)
 }
 
 /**
- * pci_acpi_add_bus_pm_notifier - Register PM notifier for given PCI bus.
- * @dev: ACPI device to add the notifier for.
- * @pci_bus: PCI bus to walk checking for PME status if an event is signaled.
- */
-acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev,
-                                        struct pci_bus *pci_bus)
-{
-       return acpi_add_pm_notifier(dev, pci_acpi_wake_bus, pci_bus);
-}
-
-/**
- * pci_acpi_remove_bus_pm_notifier - Unregister PCI bus PM notifier.
- * @dev: ACPI device to remove the notifier from.
+ * pci_acpi_add_bus_pm_notifier - Register PM notifier for root PCI bus.
+ * @dev: PCI root bridge ACPI device.
  */
-acpi_status pci_acpi_remove_bus_pm_notifier(struct acpi_device *dev)
+acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev)
 {
-       return acpi_remove_pm_notifier(dev, pci_acpi_wake_bus);
+       return acpi_add_pm_notifier(dev, NULL, pci_acpi_wake_bus);
 }
 
 /**
@@ -92,16 +81,7 @@ acpi_status pci_acpi_remove_bus_pm_notifier(struct acpi_device *dev)
 acpi_status pci_acpi_add_pm_notifier(struct acpi_device *dev,
                                     struct pci_dev *pci_dev)
 {
-       return acpi_add_pm_notifier(dev, pci_acpi_wake_dev, pci_dev);
-}
-
-/**
- * pci_acpi_remove_pm_notifier - Unregister PCI device PM notifier.
- * @dev: ACPI device to remove the notifier from.
- */
-acpi_status pci_acpi_remove_pm_notifier(struct acpi_device *dev)
-{
-       return acpi_remove_pm_notifier(dev, pci_acpi_wake_dev);
+       return acpi_add_pm_notifier(dev, &pci_dev->dev, pci_acpi_wake_dev);
 }
 
 phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle)
@@ -170,14 +150,13 @@ static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev)
 
 static bool acpi_pci_power_manageable(struct pci_dev *dev)
 {
-       acpi_handle handle = ACPI_HANDLE(&dev->dev);
-
-       return handle ? acpi_bus_power_manageable(handle) : false;
+       struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
+       return adev ? acpi_device_power_manageable(adev) : false;
 }
 
 static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
 {
-       acpi_handle handle = ACPI_HANDLE(&dev->dev);
+       struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
        static const u8 state_conv[] = {
                [PCI_D0] = ACPI_STATE_D0,
                [PCI_D1] = ACPI_STATE_D1,
@@ -188,7 +167,7 @@ static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
        int error = -EINVAL;
 
        /* If the ACPI device has _EJ0, ignore the device */
-       if (!handle || acpi_has_method(handle, "_EJ0"))
+       if (!adev || acpi_has_method(adev->handle, "_EJ0"))
                return -ENODEV;
 
        switch (state) {
@@ -202,7 +181,7 @@ static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
        case PCI_D1:
        case PCI_D2:
        case PCI_D3hot:
-               error = acpi_bus_set_power(handle, state_conv[state]);
+               error = acpi_device_set_power(adev, state_conv[state]);
        }
 
        if (!error)
@@ -214,9 +193,8 @@ static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
 
 static bool acpi_pci_can_wakeup(struct pci_dev *dev)
 {
-       acpi_handle handle = ACPI_HANDLE(&dev->dev);
-
-       return handle ? acpi_bus_can_wakeup(handle) : false;
+       struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
+       return adev ? acpi_device_can_wakeup(adev) : false;
 }
 
 static void acpi_pci_propagate_wakeup_enable(struct pci_bus *bus, bool enable)
index 1bd6363bc95ef2b0d0bd2b58d733159941c78fa0..9f43916637ca251372fb57efc32bdd82dde4eef1 100644 (file)
@@ -1431,7 +1431,7 @@ static void st_gpio_irqmux_handler(unsigned irq, struct irq_desc *desc)
 
        status = readl(info->irqmux_base);
 
-       for_each_set_bit(n, &status, ST_GPIO_PINS_PER_BANK)
+       for_each_set_bit(n, &status, info->nbanks)
                __gpio_irq_handler(&info->banks[n]);
 
        chained_irq_exit(chip, desc);
index b81448b2c75da0c8c5d1803f3af64053f834e040..d2b780aade895e2baf44a7960f064025f7063dc6 100644 (file)
@@ -67,8 +67,8 @@ static int pnpacpi_set_resources(struct pnp_dev *dev)
 
        pnp_dbg(&dev->dev, "set resources\n");
 
-       handle = ACPI_HANDLE(&dev->dev);
-       if (!handle || acpi_bus_get_device(handle, &acpi_dev)) {
+       acpi_dev = ACPI_COMPANION(&dev->dev);
+       if (!acpi_dev) {
                dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__);
                return -ENODEV;
        }
@@ -76,6 +76,7 @@ static int pnpacpi_set_resources(struct pnp_dev *dev)
        if (WARN_ON_ONCE(acpi_dev != dev->data))
                dev->data = acpi_dev;
 
+       handle = acpi_dev->handle;
        if (acpi_has_method(handle, METHOD_NAME__SRS)) {
                struct acpi_buffer buffer;
 
@@ -93,8 +94,8 @@ static int pnpacpi_set_resources(struct pnp_dev *dev)
                }
                kfree(buffer.pointer);
        }
-       if (!ret && acpi_bus_power_manageable(handle))
-               ret = acpi_bus_set_power(handle, ACPI_STATE_D0);
+       if (!ret && acpi_device_power_manageable(acpi_dev))
+               ret = acpi_device_set_power(acpi_dev, ACPI_STATE_D0);
 
        return ret;
 }
@@ -102,23 +103,22 @@ static int pnpacpi_set_resources(struct pnp_dev *dev)
 static int pnpacpi_disable_resources(struct pnp_dev *dev)
 {
        struct acpi_device *acpi_dev;
-       acpi_handle handle;
        acpi_status status;
 
        dev_dbg(&dev->dev, "disable resources\n");
 
-       handle = ACPI_HANDLE(&dev->dev);
-       if (!handle || acpi_bus_get_device(handle, &acpi_dev)) {
+       acpi_dev = ACPI_COMPANION(&dev->dev);
+       if (!acpi_dev) {
                dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__);
                return 0;
        }
 
        /* acpi_unregister_gsi(pnp_irq(dev, 0)); */
-       if (acpi_bus_power_manageable(handle))
-               acpi_bus_set_power(handle, ACPI_STATE_D3_COLD);
+       if (acpi_device_power_manageable(acpi_dev))
+               acpi_device_set_power(acpi_dev, ACPI_STATE_D3_COLD);
 
-       /* continue even if acpi_bus_set_power() fails */
-       status = acpi_evaluate_object(handle, "_DIS", NULL, NULL);
+       /* continue even if acpi_device_set_power() fails */
+       status = acpi_evaluate_object(acpi_dev->handle, "_DIS", NULL, NULL);
        if (ACPI_FAILURE(status) && status != AE_NOT_FOUND)
                return -ENODEV;
 
@@ -128,26 +128,22 @@ static int pnpacpi_disable_resources(struct pnp_dev *dev)
 #ifdef CONFIG_ACPI_SLEEP
 static bool pnpacpi_can_wakeup(struct pnp_dev *dev)
 {
-       struct acpi_device *acpi_dev;
-       acpi_handle handle;
+       struct acpi_device *acpi_dev = ACPI_COMPANION(&dev->dev);
 
-       handle = ACPI_HANDLE(&dev->dev);
-       if (!handle || acpi_bus_get_device(handle, &acpi_dev)) {
+       if (!acpi_dev) {
                dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__);
                return false;
        }
 
-       return acpi_bus_can_wakeup(handle);
+       return acpi_bus_can_wakeup(acpi_dev->handle);
 }
 
 static int pnpacpi_suspend(struct pnp_dev *dev, pm_message_t state)
 {
-       struct acpi_device *acpi_dev;
-       acpi_handle handle;
+       struct acpi_device *acpi_dev = ACPI_COMPANION(&dev->dev);
        int error = 0;
 
-       handle = ACPI_HANDLE(&dev->dev);
-       if (!handle || acpi_bus_get_device(handle, &acpi_dev)) {
+       if (!acpi_dev) {
                dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__);
                return 0;
        }
@@ -159,7 +155,7 @@ static int pnpacpi_suspend(struct pnp_dev *dev, pm_message_t state)
                        return error;
        }
 
-       if (acpi_bus_power_manageable(handle)) {
+       if (acpi_device_power_manageable(acpi_dev)) {
                int power_state = acpi_pm_device_sleep_state(&dev->dev, NULL,
                                                        ACPI_STATE_D3_COLD);
                if (power_state < 0)
@@ -167,12 +163,12 @@ static int pnpacpi_suspend(struct pnp_dev *dev, pm_message_t state)
                                        ACPI_STATE_D0 : ACPI_STATE_D3_COLD;
 
                /*
-                * acpi_bus_set_power() often fails (keyboard port can't be
+                * acpi_device_set_power() can fail (keyboard port can't be
                 * powered-down?), and in any case, our return value is ignored
                 * by pnp_bus_suspend().  Hence we don't revert the wakeup
                 * setting if the set_power fails.
                 */
-               error = acpi_bus_set_power(handle, power_state);
+               error = acpi_device_set_power(acpi_dev, power_state);
        }
 
        return error;
@@ -180,11 +176,10 @@ static int pnpacpi_suspend(struct pnp_dev *dev, pm_message_t state)
 
 static int pnpacpi_resume(struct pnp_dev *dev)
 {
-       struct acpi_device *acpi_dev;
-       acpi_handle handle = ACPI_HANDLE(&dev->dev);
+       struct acpi_device *acpi_dev = ACPI_COMPANION(&dev->dev);
        int error = 0;
 
-       if (!handle || acpi_bus_get_device(handle, &acpi_dev)) {
+       if (!acpi_dev) {
                dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__);
                return -ENODEV;
        }
@@ -192,8 +187,8 @@ static int pnpacpi_resume(struct pnp_dev *dev)
        if (device_may_wakeup(&dev->dev))
                acpi_pm_device_sleep_wake(&dev->dev, false);
 
-       if (acpi_bus_power_manageable(handle))
-               error = acpi_bus_set_power(handle, ACPI_STATE_D0);
+       if (acpi_device_power_manageable(acpi_dev))
+               error = acpi_device_set_power(acpi_dev, ACPI_STATE_D0);
 
        return error;
 }
@@ -295,9 +290,11 @@ static int __init pnpacpi_add_device(struct acpi_device *device)
                return error;
        }
 
+       error = acpi_bind_one(&dev->dev, device);
+
        num++;
 
-       return 0;
+       return error;
 }
 
 static acpi_status __init pnpacpi_add_device_handler(acpi_handle handle,
@@ -313,41 +310,6 @@ static acpi_status __init pnpacpi_add_device_handler(acpi_handle handle,
        return AE_OK;
 }
 
-static int __init acpi_pnp_match(struct device *dev, void *_pnp)
-{
-       struct acpi_device *acpi = to_acpi_device(dev);
-       struct pnp_dev *pnp = _pnp;
-
-       /* true means it matched */
-       return !acpi->physical_node_count
-           && compare_pnp_id(pnp->id, acpi_device_hid(acpi));
-}
-
-static struct acpi_device * __init acpi_pnp_find_companion(struct device *dev)
-{
-       dev = bus_find_device(&acpi_bus_type, NULL, to_pnp_dev(dev),
-                             acpi_pnp_match);
-       if (!dev)
-               return NULL;
-
-       put_device(dev);
-       return to_acpi_device(dev);
-}
-
-/* complete initialization of a PNPACPI device includes having
- * pnpdev->dev.archdata.acpi_handle point to its ACPI sibling.
- */
-static bool acpi_pnp_bus_match(struct device *dev)
-{
-       return dev->bus == &pnp_bus_type;
-}
-
-static struct acpi_bus_type __initdata acpi_pnp_bus = {
-       .name        = "PNP",
-       .match       = acpi_pnp_bus_match,
-       .find_companion = acpi_pnp_find_companion,
-};
-
 int pnpacpi_disabled __initdata;
 static int __init pnpacpi_init(void)
 {
@@ -357,10 +319,8 @@ static int __init pnpacpi_init(void)
        }
        printk(KERN_INFO "pnp: PnP ACPI init\n");
        pnp_register_protocol(&pnpacpi_protocol);
-       register_acpi_bus_type(&acpi_pnp_bus);
        acpi_get_devices(NULL, pnpacpi_add_device_handler, NULL, NULL);
        printk(KERN_INFO "pnp: PnP ACPI: found %d devices\n", num);
-       unregister_acpi_bus_type(&acpi_pnp_bus);
        pnp_platform_devices = 1;
        return 0;
 }
index 15b3459f86562d2211dde73e507197bbea04b50e..220acb4cbee520c6cfed5c78fdf49fb776f6bf99 100644 (file)
@@ -633,7 +633,6 @@ raw3270_reset_device_cb(struct raw3270_request *rq, void *data)
        } else
                raw3270_writesf_readpart(rp);
        memset(&rp->init_reset, 0, sizeof(rp->init_reset));
-       memset(&rp->init_data, 0, sizeof(rp->init_data));
 }
 
 static int
index 69ef4f8cfac8c14366c9803eec4ed6877accf786..4038437ff033f892d30c746056bbb16d12e40c5a 100644 (file)
@@ -901,10 +901,15 @@ static int ap_device_probe(struct device *dev)
        int rc;
 
        ap_dev->drv = ap_drv;
+
+       spin_lock_bh(&ap_device_list_lock);
+       list_add(&ap_dev->list, &ap_device_list);
+       spin_unlock_bh(&ap_device_list_lock);
+
        rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV;
-       if (!rc) {
+       if (rc) {
                spin_lock_bh(&ap_device_list_lock);
-               list_add(&ap_dev->list, &ap_device_list);
+               list_del_init(&ap_dev->list);
                spin_unlock_bh(&ap_device_list_lock);
        }
        return rc;
index 78b0fba7047e98d37ef4025690346bd93936d3fa..8afc6fee40c547ccca5f5c106e651c3fbfce9942 100644 (file)
@@ -1,6 +1,6 @@
 config VIDEO_OMAP4
        bool "OMAP 4 Camera support"
-       depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && I2C && ARCH_OMAP4
+       depends on VIDEO_V4L2=y && VIDEO_V4L2_SUBDEV_API && I2C=y && ARCH_OMAP4
        select VIDEOBUF2_DMA_CONTIG
        ---help---
          Driver for an OMAP 4 ISS controller.
index 9d2b673f90e30e3f676930a5f08dea35c058bfb6..b8125aa64ad8c74f2f3d12fb264eeacb80517603 100644 (file)
@@ -1169,8 +1169,8 @@ static int ep_enable(struct usb_ep *ep,
 
        if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
                cap |= QH_IOS;
-       if (hwep->num)
-               cap |= QH_ZLT;
+
+       cap |= QH_ZLT;
        cap |= (hwep->ep.maxpacket << __ffs(QH_MAX_PKT)) & QH_MAX_PKT;
        /*
         * For ISO-TX, we set mult at QH as the largest value, and use
index 21b99b4b4082e5563430f464a9f8999bff03b0f4..0e950ad8cb2525fd0b335a20b46d2fc28f814f9f 100644 (file)
@@ -889,6 +889,25 @@ static int hub_usb3_port_disable(struct usb_hub *hub, int port1)
        if (!hub_is_superspeed(hub->hdev))
                return -EINVAL;
 
+       ret = hub_port_status(hub, port1, &portstatus, &portchange);
+       if (ret < 0)
+               return ret;
+
+       /*
+        * USB controller Advanced Micro Devices, Inc. [AMD] FCH USB XHCI
+        * Controller [1022:7814] will have spurious result making the following
+        * usb 3.0 device hotplugging route to the 2.0 root hub and recognized
+        * as high-speed device if we set the usb 3.0 port link state to
+        * Disabled. Since it's already in USB_SS_PORT_LS_RX_DETECT state, we
+        * check the state here to avoid the bug.
+        */
+       if ((portstatus & USB_PORT_STAT_LINK_STATE) ==
+                               USB_SS_PORT_LS_RX_DETECT) {
+               dev_dbg(&hub->ports[port1 - 1]->dev,
+                        "Not disabling port; link state is RxDetect\n");
+               return ret;
+       }
+
        ret = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_SS_DISABLED);
        if (ret)
                return ret;
index b7a506f2bb144e1c2e59b0f84c4c736d90dd830b..5c660c77f03b58a32c24749b7053df6608a23230 100644 (file)
@@ -426,20 +426,18 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
                 * p2m are consistent.
                 */
                if (!xen_feature(XENFEAT_auto_translated_physmap)) {
-                       unsigned long p;
-                       struct page   *scratch_page = get_balloon_scratch_page();
-
                        if (!PageHighMem(page)) {
+                               struct page *scratch_page = get_balloon_scratch_page();
+
                                ret = HYPERVISOR_update_va_mapping(
                                                (unsigned long)__va(pfn << PAGE_SHIFT),
                                                pfn_pte(page_to_pfn(scratch_page),
                                                        PAGE_KERNEL_RO), 0);
                                BUG_ON(ret);
-                       }
-                       p = page_to_pfn(scratch_page);
-                       __set_phys_to_machine(pfn, pfn_to_mfn(p));
 
-                       put_balloon_scratch_page();
+                               put_balloon_scratch_page();
+                       }
+                       __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
                }
 #endif
 
index c3667b202f2f50618d85d147d86ac746535e3464..5f1e1f3cd18619ed2899dcc5e967ca274f690189 100644 (file)
@@ -88,7 +88,6 @@ static int xen_suspend(void *data)
 
        if (!si->cancelled) {
                xen_irq_resume();
-               xen_console_resume();
                xen_timer_resume();
        }
 
@@ -135,6 +134,10 @@ static void do_suspend(void)
 
        err = stop_machine(xen_suspend, &si, cpumask_of(0));
 
+       /* Resume console as early as possible. */
+       if (!si.cancelled)
+               xen_console_resume();
+
        raw_notifier_call_chain(&xen_resume_notifier, 0, NULL);
 
        dpm_resume_start(si.cancelled ? PMSG_THAW : PMSG_RESTORE);
index 955947ef3e0263590b64162f5888b81822196415..1c9c5f0a9e2be991b0bcba9ab71bcdcc1ae8ddc4 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -830,16 +830,20 @@ void exit_aio(struct mm_struct *mm)
 static void put_reqs_available(struct kioctx *ctx, unsigned nr)
 {
        struct kioctx_cpu *kcpu;
+       unsigned long flags;
 
        preempt_disable();
        kcpu = this_cpu_ptr(ctx->cpu);
 
+       local_irq_save(flags);
        kcpu->reqs_available += nr;
+
        while (kcpu->reqs_available >= ctx->req_batch * 2) {
                kcpu->reqs_available -= ctx->req_batch;
                atomic_add(ctx->req_batch, &ctx->reqs_available);
        }
 
+       local_irq_restore(flags);
        preempt_enable();
 }
 
@@ -847,10 +851,12 @@ static bool get_reqs_available(struct kioctx *ctx)
 {
        struct kioctx_cpu *kcpu;
        bool ret = false;
+       unsigned long flags;
 
        preempt_disable();
        kcpu = this_cpu_ptr(ctx->cpu);
 
+       local_irq_save(flags);
        if (!kcpu->reqs_available) {
                int old, avail = atomic_read(&ctx->reqs_available);
 
@@ -869,6 +875,7 @@ static bool get_reqs_available(struct kioctx *ctx)
        ret = true;
        kcpu->reqs_available--;
 out:
+       local_irq_restore(flags);
        preempt_enable();
        return ret;
 }
index e12441c7cf1d63ed8e7c22039187cf7d4a64e148..7187b14faa6cd0c1c846fcfb155f431102ce8bad 100644 (file)
@@ -484,8 +484,19 @@ void btrfs_wait_logged_extents(struct btrfs_root *log, u64 transid)
                                           log_list);
                list_del_init(&ordered->log_list);
                spin_unlock_irq(&log->log_extents_lock[index]);
+
+               if (!test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags) &&
+                   !test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) {
+                       struct inode *inode = ordered->inode;
+                       u64 start = ordered->file_offset;
+                       u64 end = ordered->file_offset + ordered->len - 1;
+
+                       WARN_ON(!inode);
+                       filemap_fdatawrite_range(inode->i_mapping, start, end);
+               }
                wait_event(ordered->wait, test_bit(BTRFS_ORDERED_IO_DONE,
                                                   &ordered->flags));
+
                btrfs_put_ordered_extent(ordered);
                spin_lock_irq(&log->log_extents_lock[index]);
        }
index 6104676857f5c6567bcbb5a375cb91e90a78956f..6cb82f62cb7c22c4b3038e248e52b9694171a5bd 100644 (file)
@@ -1680,11 +1680,11 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
        if (device->bdev == root->fs_info->fs_devices->latest_bdev)
                root->fs_info->fs_devices->latest_bdev = next_device->bdev;
 
-       if (device->bdev)
+       if (device->bdev) {
                device->fs_devices->open_devices--;
-
-       /* remove sysfs entry */
-       btrfs_kobj_rm_device(root->fs_info, device);
+               /* remove sysfs entry */
+               btrfs_kobj_rm_device(root->fs_info, device);
+       }
 
        call_rcu(&device->rcu, free_device);
 
index 0b2528fb640e77e4a38a351c51f8a01f12102c14..a93f7e6ea4cf935aea64e8266b221a2c53477628 100644 (file)
@@ -306,7 +306,7 @@ static int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
        if (unlikely(nr < 0))
                return nr;
 
-       tsk->flags = PF_DUMPCORE;
+       tsk->flags |= PF_DUMPCORE;
        if (atomic_read(&mm->mm_users) == nr + 1)
                goto done;
        /*
index 98040ba388ac1e2db62f96f253bc141758013b10..194d0d122cae566b8e82c1a1bd3188499a467796 100644 (file)
@@ -198,9 +198,8 @@ static inline int dio_refill_pages(struct dio *dio, struct dio_submit *sdio)
  * L1 cache.
  */
 static inline struct page *dio_get_page(struct dio *dio,
-               struct dio_submit *sdio, size_t *from, size_t *to)
+                                       struct dio_submit *sdio)
 {
-       int n;
        if (dio_pages_present(sdio) == 0) {
                int ret;
 
@@ -209,10 +208,7 @@ static inline struct page *dio_get_page(struct dio *dio,
                        return ERR_PTR(ret);
                BUG_ON(dio_pages_present(sdio) == 0);
        }
-       n = sdio->head++;
-       *from = n ? 0 : sdio->from;
-       *to = (n == sdio->tail - 1) ? sdio->to : PAGE_SIZE;
-       return dio->pages[n];
+       return dio->pages[sdio->head];
 }
 
 /**
@@ -911,11 +907,15 @@ static int do_direct_IO(struct dio *dio, struct dio_submit *sdio,
        while (sdio->block_in_file < sdio->final_block_in_request) {
                struct page *page;
                size_t from, to;
-               page = dio_get_page(dio, sdio, &from, &to);
+
+               page = dio_get_page(dio, sdio);
                if (IS_ERR(page)) {
                        ret = PTR_ERR(page);
                        goto out;
                }
+               from = sdio->head ? 0 : sdio->from;
+               to = (sdio->head == sdio->tail - 1) ? sdio->to : PAGE_SIZE;
+               sdio->head++;
 
                while (from < to) {
                        unsigned this_chunk_bytes;      /* # of bytes mapped */
index 098f97bdcf1b165282eb8b3dacf0d79380694cee..ca887314aba9deb6d59811f0d19b8c9b9a0381c3 100644 (file)
@@ -643,9 +643,8 @@ struct fuse_copy_state {
        unsigned long seglen;
        unsigned long addr;
        struct page *pg;
-       void *mapaddr;
-       void *buf;
        unsigned len;
+       unsigned offset;
        unsigned move_pages:1;
 };
 
@@ -666,23 +665,17 @@ static void fuse_copy_finish(struct fuse_copy_state *cs)
        if (cs->currbuf) {
                struct pipe_buffer *buf = cs->currbuf;
 
-               if (!cs->write) {
-                       kunmap_atomic(cs->mapaddr);
-               } else {
-                       kunmap_atomic(cs->mapaddr);
+               if (cs->write)
                        buf->len = PAGE_SIZE - cs->len;
-               }
                cs->currbuf = NULL;
-               cs->mapaddr = NULL;
-       } else if (cs->mapaddr) {
-               kunmap_atomic(cs->mapaddr);
+       } else if (cs->pg) {
                if (cs->write) {
                        flush_dcache_page(cs->pg);
                        set_page_dirty_lock(cs->pg);
                }
                put_page(cs->pg);
-               cs->mapaddr = NULL;
        }
+       cs->pg = NULL;
 }
 
 /*
@@ -691,7 +684,7 @@ static void fuse_copy_finish(struct fuse_copy_state *cs)
  */
 static int fuse_copy_fill(struct fuse_copy_state *cs)
 {
-       unsigned long offset;
+       struct page *page;
        int err;
 
        unlock_request(cs->fc, cs->req);
@@ -706,14 +699,12 @@ static int fuse_copy_fill(struct fuse_copy_state *cs)
 
                        BUG_ON(!cs->nr_segs);
                        cs->currbuf = buf;
-                       cs->mapaddr = kmap_atomic(buf->page);
+                       cs->pg = buf->page;
+                       cs->offset = buf->offset;
                        cs->len = buf->len;
-                       cs->buf = cs->mapaddr + buf->offset;
                        cs->pipebufs++;
                        cs->nr_segs--;
                } else {
-                       struct page *page;
-
                        if (cs->nr_segs == cs->pipe->buffers)
                                return -EIO;
 
@@ -726,8 +717,8 @@ static int fuse_copy_fill(struct fuse_copy_state *cs)
                        buf->len = 0;
 
                        cs->currbuf = buf;
-                       cs->mapaddr = kmap_atomic(page);
-                       cs->buf = cs->mapaddr;
+                       cs->pg = page;
+                       cs->offset = 0;
                        cs->len = PAGE_SIZE;
                        cs->pipebufs++;
                        cs->nr_segs++;
@@ -740,14 +731,13 @@ static int fuse_copy_fill(struct fuse_copy_state *cs)
                        cs->iov++;
                        cs->nr_segs--;
                }
-               err = get_user_pages_fast(cs->addr, 1, cs->write, &cs->pg);
+               err = get_user_pages_fast(cs->addr, 1, cs->write, &page);
                if (err < 0)
                        return err;
                BUG_ON(err != 1);
-               offset = cs->addr % PAGE_SIZE;
-               cs->mapaddr = kmap_atomic(cs->pg);
-               cs->buf = cs->mapaddr + offset;
-               cs->len = min(PAGE_SIZE - offset, cs->seglen);
+               cs->pg = page;
+               cs->offset = cs->addr % PAGE_SIZE;
+               cs->len = min(PAGE_SIZE - cs->offset, cs->seglen);
                cs->seglen -= cs->len;
                cs->addr += cs->len;
        }
@@ -760,15 +750,20 @@ static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
 {
        unsigned ncpy = min(*size, cs->len);
        if (val) {
+               void *pgaddr = kmap_atomic(cs->pg);
+               void *buf = pgaddr + cs->offset;
+
                if (cs->write)
-                       memcpy(cs->buf, *val, ncpy);
+                       memcpy(buf, *val, ncpy);
                else
-                       memcpy(*val, cs->buf, ncpy);
+                       memcpy(*val, buf, ncpy);
+
+               kunmap_atomic(pgaddr);
                *val += ncpy;
        }
        *size -= ncpy;
        cs->len -= ncpy;
-       cs->buf += ncpy;
+       cs->offset += ncpy;
        return ncpy;
 }
 
@@ -874,8 +869,8 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
 out_fallback_unlock:
        unlock_page(newpage);
 out_fallback:
-       cs->mapaddr = kmap_atomic(buf->page);
-       cs->buf = cs->mapaddr + buf->offset;
+       cs->pg = buf->page;
+       cs->offset = buf->offset;
 
        err = lock_request(cs->fc, cs->req);
        if (err)
index 42198359fa1b472557e44f325e9f55c305237e99..0c6048247a34eb16a5146f7ea67479a21fec6173 100644 (file)
@@ -198,7 +198,8 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
        inode = ACCESS_ONCE(entry->d_inode);
        if (inode && is_bad_inode(inode))
                goto invalid;
-       else if (fuse_dentry_time(entry) < get_jiffies_64()) {
+       else if (time_before64(fuse_dentry_time(entry), get_jiffies_64()) ||
+                (flags & LOOKUP_REVAL)) {
                int err;
                struct fuse_entry_out outarg;
                struct fuse_req *req;
@@ -814,13 +815,6 @@ static int fuse_rename_common(struct inode *olddir, struct dentry *oldent,
        return err;
 }
 
-static int fuse_rename(struct inode *olddir, struct dentry *oldent,
-                      struct inode *newdir, struct dentry *newent)
-{
-       return fuse_rename_common(olddir, oldent, newdir, newent, 0,
-                                 FUSE_RENAME, sizeof(struct fuse_rename_in));
-}
-
 static int fuse_rename2(struct inode *olddir, struct dentry *oldent,
                        struct inode *newdir, struct dentry *newent,
                        unsigned int flags)
@@ -831,17 +825,30 @@ static int fuse_rename2(struct inode *olddir, struct dentry *oldent,
        if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE))
                return -EINVAL;
 
-       if (fc->no_rename2 || fc->minor < 23)
-               return -EINVAL;
+       if (flags) {
+               if (fc->no_rename2 || fc->minor < 23)
+                       return -EINVAL;
 
-       err = fuse_rename_common(olddir, oldent, newdir, newent, flags,
-                                FUSE_RENAME2, sizeof(struct fuse_rename2_in));
-       if (err == -ENOSYS) {
-               fc->no_rename2 = 1;
-               err = -EINVAL;
+               err = fuse_rename_common(olddir, oldent, newdir, newent, flags,
+                                        FUSE_RENAME2,
+                                        sizeof(struct fuse_rename2_in));
+               if (err == -ENOSYS) {
+                       fc->no_rename2 = 1;
+                       err = -EINVAL;
+               }
+       } else {
+               err = fuse_rename_common(olddir, oldent, newdir, newent, 0,
+                                        FUSE_RENAME,
+                                        sizeof(struct fuse_rename_in));
        }
+
        return err;
+}
 
+static int fuse_rename(struct inode *olddir, struct dentry *oldent,
+                      struct inode *newdir, struct dentry *newent)
+{
+       return fuse_rename2(olddir, oldent, newdir, newent, 0);
 }
 
 static int fuse_link(struct dentry *entry, struct inode *newdir,
@@ -985,7 +992,7 @@ int fuse_update_attributes(struct inode *inode, struct kstat *stat,
        int err;
        bool r;
 
-       if (fi->i_time < get_jiffies_64()) {
+       if (time_before64(fi->i_time, get_jiffies_64())) {
                r = true;
                err = fuse_do_getattr(inode, stat, file);
        } else {
@@ -1171,7 +1178,7 @@ static int fuse_permission(struct inode *inode, int mask)
            ((mask & MAY_EXEC) && S_ISREG(inode->i_mode))) {
                struct fuse_inode *fi = get_fuse_inode(inode);
 
-               if (fi->i_time < get_jiffies_64()) {
+               if (time_before64(fi->i_time, get_jiffies_64())) {
                        refreshed = true;
 
                        err = fuse_perm_getattr(inode, mask);
index 6e16dad13e9b16de0358f8caaec9833d9f00a84b..40ac2628ddcf46f3be8fe96ed46bb37ee6ef1c62 100644 (file)
@@ -1687,7 +1687,7 @@ static int fuse_writepage_locked(struct page *page)
        error = -EIO;
        req->ff = fuse_write_file_get(fc, fi);
        if (!req->ff)
-               goto err_free;
+               goto err_nofile;
 
        fuse_write_fill(req, req->ff, page_offset(page), 0);
 
@@ -1715,6 +1715,8 @@ static int fuse_writepage_locked(struct page *page)
 
        return 0;
 
+err_nofile:
+       __free_page(tmp_page);
 err_free:
        fuse_request_free(req);
 err:
@@ -1955,8 +1957,8 @@ static int fuse_writepages(struct address_space *mapping,
        data.ff = NULL;
 
        err = -ENOMEM;
-       data.orig_pages = kzalloc(sizeof(struct page *) *
-                                 FUSE_MAX_PAGES_PER_REQ,
+       data.orig_pages = kcalloc(FUSE_MAX_PAGES_PER_REQ,
+                                 sizeof(struct page *),
                                  GFP_NOFS);
        if (!data.orig_pages)
                goto out;
index 754dcf23de8abf10ceee81926f022731b810cb54..03246cd9d47a7b27d93ce0c2a122cae89d5fbd4f 100644 (file)
@@ -478,6 +478,17 @@ static const match_table_t tokens = {
        {OPT_ERR,                       NULL}
 };
 
+static int fuse_match_uint(substring_t *s, unsigned int *res)
+{
+       int err = -ENOMEM;
+       char *buf = match_strdup(s);
+       if (buf) {
+               err = kstrtouint(buf, 10, res);
+               kfree(buf);
+       }
+       return err;
+}
+
 static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev)
 {
        char *p;
@@ -488,6 +499,7 @@ static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev)
        while ((p = strsep(&opt, ",")) != NULL) {
                int token;
                int value;
+               unsigned uv;
                substring_t args[MAX_OPT_ARGS];
                if (!*p)
                        continue;
@@ -511,18 +523,18 @@ static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev)
                        break;
 
                case OPT_USER_ID:
-                       if (match_int(&args[0], &value))
+                       if (fuse_match_uint(&args[0], &uv))
                                return 0;
-                       d->user_id = make_kuid(current_user_ns(), value);
+                       d->user_id = make_kuid(current_user_ns(), uv);
                        if (!uid_valid(d->user_id))
                                return 0;
                        d->user_id_present = 1;
                        break;
 
                case OPT_GROUP_ID:
-                       if (match_int(&args[0], &value))
+                       if (fuse_match_uint(&args[0], &uv))
                                return 0;
-                       d->group_id = make_kgid(current_user_ns(), value);
+                       d->group_id = make_kgid(current_user_ns(), uv);
                        if (!gid_valid(d->group_id))
                                return 0;
                        d->group_id_present = 1;
@@ -895,9 +907,6 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
                                fc->writeback_cache = 1;
                        if (arg->time_gran && arg->time_gran <= 1000000000)
                                fc->sb->s_time_gran = arg->time_gran;
-                       else
-                               fc->sb->s_time_gran = 1000000000;
-
                } else {
                        ra_pages = fc->max_read / PAGE_CACHE_SIZE;
                        fc->no_lock = 1;
@@ -926,7 +935,7 @@ static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req)
                FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ |
                FUSE_FLOCK_LOCKS | FUSE_IOCTL_DIR | FUSE_AUTO_INVAL_DATA |
                FUSE_DO_READDIRPLUS | FUSE_READDIRPLUS_AUTO | FUSE_ASYNC_DIO |
-               FUSE_WRITEBACK_CACHE;
+               FUSE_WRITEBACK_CACHE | FUSE_NO_OPEN_SUPPORT;
        req->in.h.opcode = FUSE_INIT;
        req->in.numargs = 1;
        req->in.args[0].size = sizeof(*arg);
@@ -1006,7 +1015,7 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
 
        sb->s_flags &= ~(MS_NOSEC | MS_I_VERSION);
 
-       if (!parse_fuse_opt((char *) data, &d, is_bdev))
+       if (!parse_fuse_opt(data, &d, is_bdev))
                goto err;
 
        if (is_bdev) {
index 4fc3a3046174dc9a296c90a0d0ca6d53485e277b..26b3f952e6b19cccd2e0333f45498c5531a99c62 100644 (file)
@@ -981,7 +981,7 @@ static int do_flock(struct file *file, int cmd, struct file_lock *fl)
        int error = 0;
 
        state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
-       flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY) | GL_EXACT | GL_NOCACHE;
+       flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY) | GL_EXACT;
 
        mutex_lock(&fp->f_fl_mutex);
 
@@ -991,7 +991,7 @@ static int do_flock(struct file *file, int cmd, struct file_lock *fl)
                        goto out;
                flock_lock_file_wait(file,
                                     &(struct file_lock){.fl_type = F_UNLCK});
-               gfs2_glock_dq_wait(fl_gh);
+               gfs2_glock_dq(fl_gh);
                gfs2_holder_reinit(state, flags, fl_gh);
        } else {
                error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr,
index c355f7320e448bfe30a8c325b2448d707f8c19be..ee4e04fe60fc5edcb9f5416e089a0ac440870ac5 100644 (file)
@@ -731,14 +731,14 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
                cachep = gfs2_glock_aspace_cachep;
        else
                cachep = gfs2_glock_cachep;
-       gl = kmem_cache_alloc(cachep, GFP_KERNEL);
+       gl = kmem_cache_alloc(cachep, GFP_NOFS);
        if (!gl)
                return -ENOMEM;
 
        memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
 
        if (glops->go_flags & GLOF_LVB) {
-               gl->gl_lksb.sb_lvbptr = kzalloc(GFS2_MIN_LVB_SIZE, GFP_KERNEL);
+               gl->gl_lksb.sb_lvbptr = kzalloc(GFS2_MIN_LVB_SIZE, GFP_NOFS);
                if (!gl->gl_lksb.sb_lvbptr) {
                        kmem_cache_free(cachep, gl);
                        return -ENOMEM;
@@ -1404,12 +1404,16 @@ __acquires(&lru_lock)
                gl = list_entry(list->next, struct gfs2_glock, gl_lru);
                list_del_init(&gl->gl_lru);
                if (!spin_trylock(&gl->gl_spin)) {
+add_back_to_lru:
                        list_add(&gl->gl_lru, &lru_list);
                        atomic_inc(&lru_count);
                        continue;
                }
+               if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
+                       spin_unlock(&gl->gl_spin);
+                       goto add_back_to_lru;
+               }
                clear_bit(GLF_LRU, &gl->gl_flags);
-               spin_unlock(&lru_lock);
                gl->gl_lockref.count++;
                if (demote_ok(gl))
                        handle_callback(gl, LM_ST_UNLOCKED, 0, false);
@@ -1417,7 +1421,7 @@ __acquires(&lru_lock)
                if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
                        gl->gl_lockref.count--;
                spin_unlock(&gl->gl_spin);
-               spin_lock(&lru_lock);
+               cond_resched_lock(&lru_lock);
        }
 }
 
@@ -1442,7 +1446,7 @@ static long gfs2_scan_glock_lru(int nr)
                gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru);
 
                /* Test for being demotable */
-               if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
+               if (!test_bit(GLF_LOCK, &gl->gl_flags)) {
                        list_move(&gl->gl_lru, &dispose);
                        atomic_dec(&lru_count);
                        freed++;
index fc1100781bbc1c954aebfd0d5a0ee317eb707012..2ffc67dce87f268d0b5824414927c6a1bae90513 100644 (file)
@@ -234,8 +234,8 @@ static void inode_go_sync(struct gfs2_glock *gl)
  * inode_go_inval - prepare a inode glock to be released
  * @gl: the glock
  * @flags:
- * 
- * Normally we invlidate everything, but if we are moving into
+ *
+ * Normally we invalidate everything, but if we are moving into
  * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we
  * can keep hold of the metadata, since it won't have changed.
  *
index 91f274de1246cabce0f052ede54dea53cf404cfc..4fafea1c9ecf1852832e06dbfe6978ae67d9223a 100644 (file)
@@ -1036,8 +1036,8 @@ static int set_recover_size(struct gfs2_sbd *sdp, struct dlm_slot *slots,
 
        new_size = old_size + RECOVER_SIZE_INC;
 
-       submit = kzalloc(new_size * sizeof(uint32_t), GFP_NOFS);
-       result = kzalloc(new_size * sizeof(uint32_t), GFP_NOFS);
+       submit = kcalloc(new_size, sizeof(uint32_t), GFP_NOFS);
+       result = kcalloc(new_size, sizeof(uint32_t), GFP_NOFS);
        if (!submit || !result) {
                kfree(submit);
                kfree(result);
index db629d1bd1bd92d78b5ab48d07efec22214e0d1f..f4cb9c0d6bbdce4bdfc4a82b20aa7aa2ac119829 100644 (file)
@@ -337,7 +337,7 @@ static bool gfs2_unaligned_extlen(struct gfs2_rbm *rbm, u32 n_unaligned, u32 *le
 
 /**
  * gfs2_free_extlen - Return extent length of free blocks
- * @rbm: Starting position
+ * @rrbm: Starting position
  * @len: Max length to check
  *
  * Starting at the block specified by the rbm, see how many free blocks
@@ -2522,7 +2522,7 @@ void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state)
 
 /**
  * gfs2_rlist_free - free a resource group list
- * @list: the list of resource groups
+ * @rlist: the list of resource groups
  *
  */
 
index 985c6f3684859e17439d62c7d319611305414437..9eb787e5c167fb601845590f0181d249bf515fb0 100644 (file)
@@ -2256,9 +2256,10 @@ done:
                goto out;
        }
        path->dentry = dentry;
-       path->mnt = mntget(nd->path.mnt);
+       path->mnt = nd->path.mnt;
        if (should_follow_link(dentry, nd->flags & LOOKUP_FOLLOW))
                return 1;
+       mntget(path->mnt);
        follow_mount(path);
        error = 0;
 out:
index 8f98138cbc4385ba63b3af77ae907219d22e6991..f11b9eed0de109d057cd86ef42c577400698992c 100644 (file)
@@ -756,7 +756,6 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
        spin_unlock(&dreq->lock);
 
        while (!list_empty(&hdr->pages)) {
-               bool do_destroy = true;
 
                req = nfs_list_entry(hdr->pages.next);
                nfs_list_remove_request(req);
@@ -765,7 +764,6 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
                case NFS_IOHDR_NEED_COMMIT:
                        kref_get(&req->wb_kref);
                        nfs_mark_request_commit(req, hdr->lseg, &cinfo);
-                       do_destroy = false;
                }
                nfs_unlock_and_release_request(req);
        }
index 82ddbf46660e3c1be7d499f2ca014ce619da8603..f415cbf9f6c3f99a208005f39c7ce206ee2b1587 100644 (file)
@@ -244,6 +244,7 @@ void nfs_pgio_data_release(struct nfs_pgio_data *);
 int nfs_generic_pgio(struct nfs_pageio_descriptor *, struct nfs_pgio_header *);
 int nfs_initiate_pgio(struct rpc_clnt *, struct nfs_pgio_data *,
                      const struct rpc_call_ops *, int, int);
+void nfs_free_request(struct nfs_page *req);
 
 static inline void nfs_iocounter_init(struct nfs_io_counter *c)
 {
index 871d6eda8dba1247e882919aff0cc20e9d0e3392..8f854dde4150e1f3dc2ace238d2ddda44e580d63 100644 (file)
@@ -247,3 +247,46 @@ const struct xattr_handler *nfs3_xattr_handlers[] = {
        &posix_acl_default_xattr_handler,
        NULL,
 };
+
+static int
+nfs3_list_one_acl(struct inode *inode, int type, const char *name, void *data,
+               size_t size, ssize_t *result)
+{
+       struct posix_acl *acl;
+       char *p = data + *result;
+
+       acl = get_acl(inode, type);
+       if (!acl)
+               return 0;
+
+       posix_acl_release(acl);
+
+       *result += strlen(name);
+       *result += 1;
+       if (!size)
+               return 0;
+       if (*result > size)
+               return -ERANGE;
+
+       strcpy(p, name);
+       return 0;
+}
+
+ssize_t
+nfs3_listxattr(struct dentry *dentry, char *data, size_t size)
+{
+       struct inode *inode = dentry->d_inode;
+       ssize_t result = 0;
+       int error;
+
+       error = nfs3_list_one_acl(inode, ACL_TYPE_ACCESS,
+                       POSIX_ACL_XATTR_ACCESS, data, size, &result);
+       if (error)
+               return error;
+
+       error = nfs3_list_one_acl(inode, ACL_TYPE_DEFAULT,
+                       POSIX_ACL_XATTR_DEFAULT, data, size, &result);
+       if (error)
+               return error;
+       return result;
+}
index e7daa42bbc86e888a7ebdc19e25aac30dfd3c3eb..f0afa291fd5883278783f846e6b2770ef69232d8 100644 (file)
@@ -885,7 +885,7 @@ static const struct inode_operations nfs3_dir_inode_operations = {
        .getattr        = nfs_getattr,
        .setattr        = nfs_setattr,
 #ifdef CONFIG_NFS_V3_ACL
-       .listxattr      = generic_listxattr,
+       .listxattr      = nfs3_listxattr,
        .getxattr       = generic_getxattr,
        .setxattr       = generic_setxattr,
        .removexattr    = generic_removexattr,
@@ -899,7 +899,7 @@ static const struct inode_operations nfs3_file_inode_operations = {
        .getattr        = nfs_getattr,
        .setattr        = nfs_setattr,
 #ifdef CONFIG_NFS_V3_ACL
-       .listxattr      = generic_listxattr,
+       .listxattr      = nfs3_listxattr,
        .getxattr       = generic_getxattr,
        .setxattr       = generic_setxattr,
        .removexattr    = generic_removexattr,
index b6ee3a6ee96dd2b06df61a022fadc0841da8d0b4..17fab89f635898ca2e82fae1a8e84bfee05b1a07 100644 (file)
@@ -29,8 +29,6 @@
 static struct kmem_cache *nfs_page_cachep;
 static const struct rpc_call_ops nfs_pgio_common_ops;
 
-static void nfs_free_request(struct nfs_page *);
-
 static bool nfs_pgarray_set(struct nfs_page_array *p, unsigned int pagecount)
 {
        p->npages = pagecount;
@@ -239,20 +237,28 @@ nfs_page_group_init(struct nfs_page *req, struct nfs_page *prev)
        WARN_ON_ONCE(prev == req);
 
        if (!prev) {
+               /* a head request */
                req->wb_head = req;
                req->wb_this_page = req;
        } else {
+               /* a subrequest */
                WARN_ON_ONCE(prev->wb_this_page != prev->wb_head);
                WARN_ON_ONCE(!test_bit(PG_HEADLOCK, &prev->wb_head->wb_flags));
                req->wb_head = prev->wb_head;
                req->wb_this_page = prev->wb_this_page;
                prev->wb_this_page = req;
 
+               /* All subrequests take a ref on the head request until
+                * nfs_page_group_destroy is called */
+               kref_get(&req->wb_head->wb_kref);
+
                /* grab extra ref if head request has extra ref from
                 * the write/commit path to handle handoff between write
                 * and commit lists */
-               if (test_bit(PG_INODE_REF, &prev->wb_head->wb_flags))
+               if (test_bit(PG_INODE_REF, &prev->wb_head->wb_flags)) {
+                       set_bit(PG_INODE_REF, &req->wb_flags);
                        kref_get(&req->wb_kref);
+               }
        }
 }
 
@@ -269,6 +275,10 @@ nfs_page_group_destroy(struct kref *kref)
        struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref);
        struct nfs_page *tmp, *next;
 
+       /* subrequests must release the ref on the head request */
+       if (req->wb_head != req)
+               nfs_release_request(req->wb_head);
+
        if (!nfs_page_group_sync_on_bit(req, PG_TEARDOWN))
                return;
 
@@ -394,7 +404,7 @@ static void nfs_clear_request(struct nfs_page *req)
  *
  * Note: Should never be called with the spinlock held!
  */
-static void nfs_free_request(struct nfs_page *req)
+void nfs_free_request(struct nfs_page *req)
 {
        WARN_ON_ONCE(req->wb_this_page != req);
 
@@ -925,7 +935,6 @@ static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
                        nfs_pageio_doio(desc);
                        if (desc->pg_error < 0)
                                return 0;
-                       desc->pg_moreio = 0;
                        if (desc->pg_recoalesce)
                                return 0;
                        /* retry add_request for this subreq */
@@ -972,6 +981,7 @@ static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
                desc->pg_count = 0;
                desc->pg_base = 0;
                desc->pg_recoalesce = 0;
+               desc->pg_moreio = 0;
 
                while (!list_empty(&head)) {
                        struct nfs_page *req;
index 98ff061ccaf3650c6069b251e879a5537d6a8d7f..5e2f10304548ee90170aebbc3df2834189033606 100644 (file)
@@ -46,6 +46,7 @@ static const struct rpc_call_ops nfs_commit_ops;
 static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops;
 static const struct nfs_commit_completion_ops nfs_commit_completion_ops;
 static const struct nfs_rw_ops nfs_rw_write_ops;
+static void nfs_clear_request_commit(struct nfs_page *req);
 
 static struct kmem_cache *nfs_wdata_cachep;
 static mempool_t *nfs_wdata_mempool;
@@ -91,8 +92,15 @@ static void nfs_context_set_write_error(struct nfs_open_context *ctx, int error)
        set_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
 }
 
+/*
+ * nfs_page_find_head_request_locked - find head request associated with @page
+ *
+ * must be called while holding the inode lock.
+ *
+ * returns matching head request with reference held, or NULL if not found.
+ */
 static struct nfs_page *
-nfs_page_find_request_locked(struct nfs_inode *nfsi, struct page *page)
+nfs_page_find_head_request_locked(struct nfs_inode *nfsi, struct page *page)
 {
        struct nfs_page *req = NULL;
 
@@ -104,25 +112,33 @@ nfs_page_find_request_locked(struct nfs_inode *nfsi, struct page *page)
                /* Linearly search the commit list for the correct req */
                list_for_each_entry_safe(freq, t, &nfsi->commit_info.list, wb_list) {
                        if (freq->wb_page == page) {
-                               req = freq;
+                               req = freq->wb_head;
                                break;
                        }
                }
        }
 
-       if (req)
+       if (req) {
+               WARN_ON_ONCE(req->wb_head != req);
+
                kref_get(&req->wb_kref);
+       }
 
        return req;
 }
 
-static struct nfs_page *nfs_page_find_request(struct page *page)
+/*
+ * nfs_page_find_head_request - find head request associated with @page
+ *
+ * returns matching head request with reference held, or NULL if not found.
+ */
+static struct nfs_page *nfs_page_find_head_request(struct page *page)
 {
        struct inode *inode = page_file_mapping(page)->host;
        struct nfs_page *req = NULL;
 
        spin_lock(&inode->i_lock);
-       req = nfs_page_find_request_locked(NFS_I(inode), page);
+       req = nfs_page_find_head_request_locked(NFS_I(inode), page);
        spin_unlock(&inode->i_lock);
        return req;
 }
@@ -274,36 +290,246 @@ static void nfs_end_page_writeback(struct nfs_page *req)
                clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC);
 }
 
-static struct nfs_page *nfs_find_and_lock_request(struct page *page, bool nonblock)
+
+/* nfs_page_group_clear_bits
+ *   @req - an nfs request
+ * clears all page group related bits from @req
+ */
+static void
+nfs_page_group_clear_bits(struct nfs_page *req)
+{
+       clear_bit(PG_TEARDOWN, &req->wb_flags);
+       clear_bit(PG_UNLOCKPAGE, &req->wb_flags);
+       clear_bit(PG_UPTODATE, &req->wb_flags);
+       clear_bit(PG_WB_END, &req->wb_flags);
+       clear_bit(PG_REMOVE, &req->wb_flags);
+}
+
+
+/*
+ * nfs_unroll_locks_and_wait -  unlock all newly locked reqs and wait on @req
+ *
+ * this is a helper function for nfs_lock_and_join_requests
+ *
+ * @inode - inode associated with request page group, must be holding inode lock
+ * @head  - head request of page group, must be holding head lock
+ * @req   - request that couldn't lock and needs to wait on the req bit lock
+ * @nonblock - if true, don't actually wait
+ *
+ * NOTE: this must be called holding page_group bit lock and inode spin lock
+ *       and BOTH will be released before returning.
+ *
+ * returns 0 on success, < 0 on error.
+ */
+static int
+nfs_unroll_locks_and_wait(struct inode *inode, struct nfs_page *head,
+                         struct nfs_page *req, bool nonblock)
+       __releases(&inode->i_lock)
+{
+       struct nfs_page *tmp;
+       int ret;
+
+       /* relinquish all the locks successfully grabbed this run */
+       for (tmp = head ; tmp != req; tmp = tmp->wb_this_page)
+               nfs_unlock_request(tmp);
+
+       WARN_ON_ONCE(test_bit(PG_TEARDOWN, &req->wb_flags));
+
+       /* grab a ref on the request that will be waited on */
+       kref_get(&req->wb_kref);
+
+       nfs_page_group_unlock(head);
+       spin_unlock(&inode->i_lock);
+
+       /* release ref from nfs_page_find_head_request_locked */
+       nfs_release_request(head);
+
+       if (!nonblock)
+               ret = nfs_wait_on_request(req);
+       else
+               ret = -EAGAIN;
+       nfs_release_request(req);
+
+       return ret;
+}
+
+/*
+ * nfs_destroy_unlinked_subrequests - destroy recently unlinked subrequests
+ *
+ * @destroy_list - request list (using wb_this_page) terminated by @old_head
+ * @old_head - the old head of the list
+ *
+ * All subrequests must be locked and removed from all lists, so at this point
+ * they are only "active" in this function, and possibly in nfs_wait_on_request
+ * with a reference held by some other context.
+ */
+static void
+nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list,
+                                struct nfs_page *old_head)
+{
+       while (destroy_list) {
+               struct nfs_page *subreq = destroy_list;
+
+               destroy_list = (subreq->wb_this_page == old_head) ?
+                                  NULL : subreq->wb_this_page;
+
+               WARN_ON_ONCE(old_head != subreq->wb_head);
+
+               /* make sure old group is not used */
+               subreq->wb_head = subreq;
+               subreq->wb_this_page = subreq;
+
+               nfs_clear_request_commit(subreq);
+
+               /* subreq is now totally disconnected from page group or any
+                * write / commit lists. last chance to wake any waiters */
+               nfs_unlock_request(subreq);
+
+               if (!test_bit(PG_TEARDOWN, &subreq->wb_flags)) {
+                       /* release ref on old head request */
+                       nfs_release_request(old_head);
+
+                       nfs_page_group_clear_bits(subreq);
+
+                       /* release the PG_INODE_REF reference */
+                       if (test_and_clear_bit(PG_INODE_REF, &subreq->wb_flags))
+                               nfs_release_request(subreq);
+                       else
+                               WARN_ON_ONCE(1);
+               } else {
+                       WARN_ON_ONCE(test_bit(PG_CLEAN, &subreq->wb_flags));
+                       /* zombie requests have already released the last
+                        * reference and were waiting on the rest of the
+                        * group to complete. Since it's no longer part of a
+                        * group, simply free the request */
+                       nfs_page_group_clear_bits(subreq);
+                       nfs_free_request(subreq);
+               }
+       }
+}
+
+/*
+ * nfs_lock_and_join_requests - join all subreqs to the head req and return
+ *                              a locked reference, cancelling any pending
+ *                              operations for this page.
+ *
+ * @page - the page used to lookup the "page group" of nfs_page structures
+ * @nonblock - if true, don't block waiting for request locks
+ *
+ * This function joins all sub requests to the head request by first
+ * locking all requests in the group, cancelling any pending operations
+ * and finally updating the head request to cover the whole range covered by
+ * the (former) group.  All subrequests are removed from any write or commit
+ * lists, unlinked from the group and destroyed.
+ *
+ * Returns a locked, referenced pointer to the head request - which after
+ * this call is guaranteed to be the only request associated with the page.
+ * Returns NULL if no requests are found for @page, or a ERR_PTR if an
+ * error was encountered.
+ */
+static struct nfs_page *
+nfs_lock_and_join_requests(struct page *page, bool nonblock)
 {
        struct inode *inode = page_file_mapping(page)->host;
-       struct nfs_page *req;
+       struct nfs_page *head, *subreq;
+       struct nfs_page *destroy_list = NULL;
+       unsigned int total_bytes;
        int ret;
 
+try_again:
+       total_bytes = 0;
+
+       WARN_ON_ONCE(destroy_list);
+
        spin_lock(&inode->i_lock);
-       for (;;) {
-               req = nfs_page_find_request_locked(NFS_I(inode), page);
-               if (req == NULL)
-                       break;
-               if (nfs_lock_request(req))
-                       break;
-               /* Note: If we hold the page lock, as is the case in nfs_writepage,
-                *       then the call to nfs_lock_request() will always
-                *       succeed provided that someone hasn't already marked the
-                *       request as dirty (in which case we don't care).
-                */
+
+       /*
+        * A reference is taken only on the head request which acts as a
+        * reference to the whole page group - the group will not be destroyed
+        * until the head reference is released.
+        */
+       head = nfs_page_find_head_request_locked(NFS_I(inode), page);
+
+       if (!head) {
                spin_unlock(&inode->i_lock);
-               if (!nonblock)
-                       ret = nfs_wait_on_request(req);
-               else
-                       ret = -EAGAIN;
-               nfs_release_request(req);
-               if (ret != 0)
+               return NULL;
+       }
+
+       /* lock each request in the page group */
+       nfs_page_group_lock(head);
+       subreq = head;
+       do {
+               /*
+                * Subrequests are always contiguous, non overlapping
+                * and in order. If not, it's a programming error.
+                */
+               WARN_ON_ONCE(subreq->wb_offset !=
+                    (head->wb_offset + total_bytes));
+
+               /* keep track of how many bytes this group covers */
+               total_bytes += subreq->wb_bytes;
+
+               if (!nfs_lock_request(subreq)) {
+                       /* releases page group bit lock and
+                        * inode spin lock and all references */
+                       ret = nfs_unroll_locks_and_wait(inode, head,
+                               subreq, nonblock);
+
+                       if (ret == 0)
+                               goto try_again;
+
                        return ERR_PTR(ret);
-               spin_lock(&inode->i_lock);
+               }
+
+               subreq = subreq->wb_this_page;
+       } while (subreq != head);
+
+       /* Now that all requests are locked, make sure they aren't on any list.
+        * Commit list removal accounting is done after locks are dropped */
+       subreq = head;
+       do {
+               nfs_list_remove_request(subreq);
+               subreq = subreq->wb_this_page;
+       } while (subreq != head);
+
+       /* unlink subrequests from head, destroy them later */
+       if (head->wb_this_page != head) {
+               /* destroy list will be terminated by head */
+               destroy_list = head->wb_this_page;
+               head->wb_this_page = head;
+
+               /* change head request to cover whole range that
+                * the former page group covered */
+               head->wb_bytes = total_bytes;
        }
+
+       /*
+        * prepare head request to be added to new pgio descriptor
+        */
+       nfs_page_group_clear_bits(head);
+
+       /*
+        * some part of the group was still on the inode list - otherwise
+        * the group wouldn't be involved in async write.
+        * grab a reference for the head request, iff it needs one.
+        */
+       if (!test_and_set_bit(PG_INODE_REF, &head->wb_flags))
+               kref_get(&head->wb_kref);
+
+       nfs_page_group_unlock(head);
+
+       /* drop lock to clear_request_commit the head req and clean up
+        * requests on destroy list */
        spin_unlock(&inode->i_lock);
-       return req;
+
+       nfs_destroy_unlinked_subrequests(destroy_list, head);
+
+       /* clean up commit list state */
+       nfs_clear_request_commit(head);
+
+       /* still holds ref on head from nfs_page_find_head_request_locked
+        * and still has lock on head from lock loop */
+       return head;
 }
 
 /*
@@ -316,7 +542,7 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
        struct nfs_page *req;
        int ret = 0;
 
-       req = nfs_find_and_lock_request(page, nonblock);
+       req = nfs_lock_and_join_requests(page, nonblock);
        if (!req)
                goto out;
        ret = PTR_ERR(req);
@@ -448,7 +674,9 @@ static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
                set_page_private(req->wb_page, (unsigned long)req);
        }
        nfsi->npages++;
-       set_bit(PG_INODE_REF, &req->wb_flags);
+       /* this a head request for a page group - mark it as having an
+        * extra reference so sub groups can follow suit */
+       WARN_ON(test_and_set_bit(PG_INODE_REF, &req->wb_flags));
        kref_get(&req->wb_kref);
        spin_unlock(&inode->i_lock);
 }
@@ -474,7 +702,9 @@ static void nfs_inode_remove_request(struct nfs_page *req)
                nfsi->npages--;
                spin_unlock(&inode->i_lock);
        }
-       nfs_release_request(req);
+
+       if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags))
+               nfs_release_request(req);
 }
 
 static void
@@ -638,7 +868,6 @@ static void nfs_write_completion(struct nfs_pgio_header *hdr)
 {
        struct nfs_commit_info cinfo;
        unsigned long bytes = 0;
-       bool do_destroy;
 
        if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
                goto out;
@@ -668,7 +897,6 @@ remove_req:
 next:
                nfs_unlock_request(req);
                nfs_end_page_writeback(req);
-               do_destroy = !test_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags);
                nfs_release_request(req);
        }
 out:
@@ -769,7 +997,7 @@ static struct nfs_page *nfs_try_to_update_request(struct inode *inode,
        spin_lock(&inode->i_lock);
 
        for (;;) {
-               req = nfs_page_find_request_locked(NFS_I(inode), page);
+               req = nfs_page_find_head_request_locked(NFS_I(inode), page);
                if (req == NULL)
                        goto out_unlock;
 
@@ -877,7 +1105,7 @@ int nfs_flush_incompatible(struct file *file, struct page *page)
         * dropped page.
         */
        do {
-               req = nfs_page_find_request(page);
+               req = nfs_page_find_head_request(page);
                if (req == NULL)
                        return 0;
                l_ctx = req->wb_lock_context;
@@ -1569,27 +1797,28 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page)
        struct nfs_page *req;
        int ret = 0;
 
-       for (;;) {
-               wait_on_page_writeback(page);
-               req = nfs_page_find_request(page);
-               if (req == NULL)
-                       break;
-               if (nfs_lock_request(req)) {
-                       nfs_clear_request_commit(req);
-                       nfs_inode_remove_request(req);
-                       /*
-                        * In case nfs_inode_remove_request has marked the
-                        * page as being dirty
-                        */
-                       cancel_dirty_page(page, PAGE_CACHE_SIZE);
-                       nfs_unlock_and_release_request(req);
-                       break;
-               }
-               ret = nfs_wait_on_request(req);
-               nfs_release_request(req);
-               if (ret < 0)
-                       break;
+       wait_on_page_writeback(page);
+
+       /* blocking call to cancel all requests and join to a single (head)
+        * request */
+       req = nfs_lock_and_join_requests(page, false);
+
+       if (IS_ERR(req)) {
+               ret = PTR_ERR(req);
+       } else if (req) {
+               /* all requests from this page have been cancelled by
+                * nfs_lock_and_join_requests, so just remove the head
+                * request from the inode / page_private pointer and
+                * release it */
+               nfs_inode_remove_request(req);
+               /*
+                * In case nfs_inode_remove_request has marked the
+                * page as being dirty
+                */
+               cancel_dirty_page(page, PAGE_CACHE_SIZE);
+               nfs_unlock_and_release_request(req);
        }
+
        return ret;
 }
 
index b56b1cc0271853b566f83f4157eaa753fd8ad2e3..944275c8f56ddf79ec457f7ad5d916f6ad7c5982 100644 (file)
@@ -2879,6 +2879,7 @@ again:
                 * return the conflicting open:
                 */
                if (conf->len) {
+                       kfree(conf->data);
                        conf->len = 0;
                        conf->data = NULL;
                        goto again;
@@ -2891,6 +2892,7 @@ again:
        if (conf->len) {
                p = xdr_encode_opaque_fixed(p, &ld->ld_clientid, 8);
                p = xdr_encode_opaque(p, conf->data, conf->len);
+               kfree(conf->data);
        }  else {  /* non - nfsv4 lock in conflict, no clientid nor owner */
                p = xdr_encode_hyper(p, (u64)0); /* clientid */
                *p++ = cpu_to_be32(0); /* length of owner name */
@@ -2907,7 +2909,7 @@ nfsd4_encode_lock(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_lo
                nfserr = nfsd4_encode_stateid(xdr, &lock->lk_resp_stateid);
        else if (nfserr == nfserr_denied)
                nfserr = nfsd4_encode_lock_denied(xdr, &lock->lk_denied);
-       kfree(lock->lk_denied.ld_owner.data);
+
        return nfserr;
 }
 
index 9cd5f63715c0ece96c9e92191696997685a2a654..7f30bdc57d13be7a86bb2e06c20c0e78ea3aa056 100644 (file)
@@ -702,6 +702,7 @@ dqcache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
        struct dquot *dquot;
        unsigned long freed = 0;
 
+       spin_lock(&dq_list_lock);
        head = free_dquots.prev;
        while (head != &free_dquots && sc->nr_to_scan) {
                dquot = list_entry(head, struct dquot, dq_free);
@@ -713,6 +714,7 @@ dqcache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
                freed++;
                head = free_dquots.prev;
        }
+       spin_unlock(&dq_list_lock);
        return freed;
 }
 
index 3377dff184042044547d42e9b123944a0ad7e96a..c69e6d43a0d2e863c3e2d6680731371e0d7ef284 100644 (file)
@@ -843,7 +843,7 @@ struct simple_xattr *simple_xattr_alloc(const void *value, size_t size)
 
        /* wrap around? */
        len = sizeof(*new_xattr) + size;
-       if (len <= sizeof(*new_xattr))
+       if (len < sizeof(*new_xattr))
                return NULL;
 
        new_xattr = kmalloc(len, GFP_KERNEL);
index 96175df211b1955f98843d7d251f6e204fa4c2e2..75c3fe5f3d9d82a34c84139c56eb4028dd3f42d0 100644 (file)
@@ -4298,8 +4298,8 @@ xfs_bmapi_delay(
 }
 
 
-int
-__xfs_bmapi_allocate(
+static int
+xfs_bmapi_allocate(
        struct xfs_bmalloca     *bma)
 {
        struct xfs_mount        *mp = bma->ip->i_mount;
@@ -4578,9 +4578,6 @@ xfs_bmapi_write(
        bma.flist = flist;
        bma.firstblock = firstblock;
 
-       if (flags & XFS_BMAPI_STACK_SWITCH)
-               bma.stack_switch = 1;
-
        while (bno < end && n < *nmap) {
                inhole = eof || bma.got.br_startoff > bno;
                wasdelay = !inhole && isnullstartblock(bma.got.br_startblock);
index 38ba36e9b2f0c5616f018c0e5474da7ce9b42290..b879ca56a64ccfab5b2a42502a5b50f68b85f1df 100644 (file)
@@ -77,7 +77,6 @@ typedef       struct xfs_bmap_free
  * from written to unwritten, otherwise convert from unwritten to written.
  */
 #define XFS_BMAPI_CONVERT      0x040
-#define XFS_BMAPI_STACK_SWITCH 0x080
 
 #define XFS_BMAPI_FLAGS \
        { XFS_BMAPI_ENTIRE,     "ENTIRE" }, \
@@ -86,8 +85,7 @@ typedef       struct xfs_bmap_free
        { XFS_BMAPI_PREALLOC,   "PREALLOC" }, \
        { XFS_BMAPI_IGSTATE,    "IGSTATE" }, \
        { XFS_BMAPI_CONTIG,     "CONTIG" }, \
-       { XFS_BMAPI_CONVERT,    "CONVERT" }, \
-       { XFS_BMAPI_STACK_SWITCH, "STACK_SWITCH" }
+       { XFS_BMAPI_CONVERT,    "CONVERT" }
 
 
 static inline int xfs_bmapi_aflag(int w)
index 703b3ec1796cd7ed443a13a4fb1a56bad762bab7..64731ef3324d4b44a938aeac30fc3b816d890222 100644 (file)
@@ -248,59 +248,6 @@ xfs_bmap_rtalloc(
        return 0;
 }
 
-/*
- * Stack switching interfaces for allocation
- */
-static void
-xfs_bmapi_allocate_worker(
-       struct work_struct      *work)
-{
-       struct xfs_bmalloca     *args = container_of(work,
-                                               struct xfs_bmalloca, work);
-       unsigned long           pflags;
-       unsigned long           new_pflags = PF_FSTRANS;
-
-       /*
-        * we are in a transaction context here, but may also be doing work
-        * in kswapd context, and hence we may need to inherit that state
-        * temporarily to ensure that we don't block waiting for memory reclaim
-        * in any way.
-        */
-       if (args->kswapd)
-               new_pflags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
-
-       current_set_flags_nested(&pflags, new_pflags);
-
-       args->result = __xfs_bmapi_allocate(args);
-       complete(args->done);
-
-       current_restore_flags_nested(&pflags, new_pflags);
-}
-
-/*
- * Some allocation requests often come in with little stack to work on. Push
- * them off to a worker thread so there is lots of stack to use. Otherwise just
- * call directly to avoid the context switch overhead here.
- */
-int
-xfs_bmapi_allocate(
-       struct xfs_bmalloca     *args)
-{
-       DECLARE_COMPLETION_ONSTACK(done);
-
-       if (!args->stack_switch)
-               return __xfs_bmapi_allocate(args);
-
-
-       args->done = &done;
-       args->kswapd = current_is_kswapd();
-       INIT_WORK_ONSTACK(&args->work, xfs_bmapi_allocate_worker);
-       queue_work(xfs_alloc_wq, &args->work);
-       wait_for_completion(&done);
-       destroy_work_on_stack(&args->work);
-       return args->result;
-}
-
 /*
  * Check if the endoff is outside the last extent. If so the caller will grow
  * the allocation to a stripe unit boundary.  All offsets are considered outside
index 075f72232a64a92de08665d348dd09a7ad6bd1d4..2fdb72d2c908fc5f962f5beff4166c1b07c69f08 100644 (file)
@@ -55,8 +55,6 @@ struct xfs_bmalloca {
        bool                    userdata;/* set if is user data */
        bool                    aeof;   /* allocated space at eof */
        bool                    conv;   /* overwriting unwritten extents */
-       bool                    stack_switch;
-       bool                    kswapd; /* allocation in kswapd context */
        int                     flags;
        struct completion       *done;
        struct work_struct      work;
@@ -66,8 +64,6 @@ struct xfs_bmalloca {
 int    xfs_bmap_finish(struct xfs_trans **tp, struct xfs_bmap_free *flist,
                        int *committed);
 int    xfs_bmap_rtalloc(struct xfs_bmalloca *ap);
-int    xfs_bmapi_allocate(struct xfs_bmalloca *args);
-int    __xfs_bmapi_allocate(struct xfs_bmalloca *args);
 int    xfs_bmap_eof(struct xfs_inode *ip, xfs_fileoff_t endoff,
                     int whichfork, int *eof);
 int    xfs_bmap_count_blocks(struct xfs_trans *tp, struct xfs_inode *ip,
index bf810c6baf2b8144cd5e28fbcda8cf1162077219..cf893bc1e373a967ba978836310d8d2abf4a0871 100644 (file)
@@ -33,6 +33,7 @@
 #include "xfs_error.h"
 #include "xfs_trace.h"
 #include "xfs_cksum.h"
+#include "xfs_alloc.h"
 
 /*
  * Cursor allocation zone.
@@ -2323,7 +2324,7 @@ error1:
  * record (to be inserted into parent).
  */
 STATIC int                                     /* error */
-xfs_btree_split(
+__xfs_btree_split(
        struct xfs_btree_cur    *cur,
        int                     level,
        union xfs_btree_ptr     *ptrp,
@@ -2503,6 +2504,85 @@ error0:
        return error;
 }
 
+struct xfs_btree_split_args {
+       struct xfs_btree_cur    *cur;
+       int                     level;
+       union xfs_btree_ptr     *ptrp;
+       union xfs_btree_key     *key;
+       struct xfs_btree_cur    **curp;
+       int                     *stat;          /* success/failure */
+       int                     result;
+       bool                    kswapd; /* allocation in kswapd context */
+       struct completion       *done;
+       struct work_struct      work;
+};
+
+/*
+ * Stack switching interfaces for allocation
+ */
+static void
+xfs_btree_split_worker(
+       struct work_struct      *work)
+{
+       struct xfs_btree_split_args     *args = container_of(work,
+                                               struct xfs_btree_split_args, work);
+       unsigned long           pflags;
+       unsigned long           new_pflags = PF_FSTRANS;
+
+       /*
+        * we are in a transaction context here, but may also be doing work
+        * in kswapd context, and hence we may need to inherit that state
+        * temporarily to ensure that we don't block waiting for memory reclaim
+        * in any way.
+        */
+       if (args->kswapd)
+               new_pflags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
+
+       current_set_flags_nested(&pflags, new_pflags);
+
+       args->result = __xfs_btree_split(args->cur, args->level, args->ptrp,
+                                        args->key, args->curp, args->stat);
+       complete(args->done);
+
+       current_restore_flags_nested(&pflags, new_pflags);
+}
+
+/*
+ * BMBT split requests often come in with little stack to work on. Push
+ * them off to a worker thread so there is lots of stack to use. For the other
+ * btree types, just call directly to avoid the context switch overhead here.
+ */
+STATIC int                                     /* error */
+xfs_btree_split(
+       struct xfs_btree_cur    *cur,
+       int                     level,
+       union xfs_btree_ptr     *ptrp,
+       union xfs_btree_key     *key,
+       struct xfs_btree_cur    **curp,
+       int                     *stat)          /* success/failure */
+{
+       struct xfs_btree_split_args     args;
+       DECLARE_COMPLETION_ONSTACK(done);
+
+       if (cur->bc_btnum != XFS_BTNUM_BMAP)
+               return __xfs_btree_split(cur, level, ptrp, key, curp, stat);
+
+       args.cur = cur;
+       args.level = level;
+       args.ptrp = ptrp;
+       args.key = key;
+       args.curp = curp;
+       args.stat = stat;
+       args.done = &done;
+       args.kswapd = current_is_kswapd();
+       INIT_WORK_ONSTACK(&args.work, xfs_btree_split_worker);
+       queue_work(xfs_alloc_wq, &args.work);
+       wait_for_completion(&done);
+       destroy_work_on_stack(&args.work);
+       return args.result;
+}
+
+
 /*
  * Copy the old inode root contents into a real block and make the
  * broot point to it.
index 6c5eb4c551e3f562e1aba435ceb9a0df438b9e08..6d3ec2b6ee294c7ec38e28fd32376162276f1005 100644 (file)
@@ -749,8 +749,7 @@ xfs_iomap_write_allocate(
                         * pointer that the caller gave to us.
                         */
                        error = xfs_bmapi_write(tp, ip, map_start_fsb,
-                                               count_fsb,
-                                               XFS_BMAPI_STACK_SWITCH,
+                                               count_fsb, 0,
                                                &first_block, 1,
                                                imap, &nimaps, &free_list);
                        if (error)
index c3453b11f5636d228cb5ceb433aa8c67e89c7709..7703fa6770ff77bab6faa57e2aeaa5eedb925b33 100644 (file)
@@ -483,10 +483,16 @@ xfs_sb_quota_to_disk(
        }
 
        /*
-        * GQUOTINO and PQUOTINO cannot be used together in versions
-        * of superblock that do not have pquotino. from->sb_flags
-        * tells us which quota is active and should be copied to
-        * disk.
+        * GQUOTINO and PQUOTINO cannot be used together in versions of
+        * superblock that do not have pquotino. from->sb_flags tells us which
+        * quota is active and should be copied to disk. If neither are active,
+        * make sure we write NULLFSINO to the sb_gquotino field as a quota
+        * inode value of "0" is invalid when the XFS_SB_VERSION_QUOTA feature
+        * bit is set.
+        *
+        * Note that we don't need to handle the sb_uquotino or sb_pquotino here
+        * as they do not require any translation. Hence the main sb field loop
+        * will write them appropriately from the in-core superblock.
         */
        if ((*fields & XFS_SB_GQUOTINO) &&
                                (from->sb_qflags & XFS_GQUOTA_ACCT))
@@ -494,6 +500,17 @@ xfs_sb_quota_to_disk(
        else if ((*fields & XFS_SB_PQUOTINO) &&
                                (from->sb_qflags & XFS_PQUOTA_ACCT))
                to->sb_gquotino = cpu_to_be64(from->sb_pquotino);
+       else {
+               /*
+                * We can't rely on just the fields being logged to tell us
+                * that it is safe to write NULLFSINO - we should only do that
+                * if quotas are not actually enabled. Hence only write
+                * NULLFSINO if both in-core quota inodes are NULL.
+                */
+               if (from->sb_gquotino == NULLFSINO &&
+                   from->sb_pquotino == NULLFSINO)
+                       to->sb_gquotino = cpu_to_be64(NULLFSINO);
+       }
 
        *fields &= ~(XFS_SB_PQUOTINO | XFS_SB_GQUOTINO);
 }
index e0720a569b2c5e600f26599916e5ac33243b66a1..bcfd808b1098e81e410310e33582d814274188b8 100644 (file)
@@ -315,12 +315,19 @@ struct acpi_device_wakeup_flags {
        u8 notifier_present:1;  /* Wake-up notify handler has been installed */
 };
 
+struct acpi_device_wakeup_context {
+       struct work_struct work;
+       struct device *dev;
+};
+
 struct acpi_device_wakeup {
        acpi_handle gpe_device;
        u64 gpe_number;
        u64 sleep_state;
        struct list_head resources;
        struct acpi_device_wakeup_flags flags;
+       struct acpi_device_wakeup_context context;
+       struct wakeup_source *ws;
        int prepare_count;
 };
 
@@ -481,6 +488,8 @@ struct acpi_bus_type {
 };
 int register_acpi_bus_type(struct acpi_bus_type *);
 int unregister_acpi_bus_type(struct acpi_bus_type *);
+int acpi_bind_one(struct device *dev, struct acpi_device *adev);
+int acpi_unbind_one(struct device *dev);
 
 struct acpi_pci_root {
        struct acpi_device * device;
@@ -504,20 +513,18 @@ int acpi_enable_wakeup_device_power(struct acpi_device *dev, int state);
 int acpi_disable_wakeup_device_power(struct acpi_device *dev);
 
 #ifdef CONFIG_PM
-acpi_status acpi_add_pm_notifier(struct acpi_device *adev,
-                                acpi_notify_handler handler, void *context);
-acpi_status acpi_remove_pm_notifier(struct acpi_device *adev,
-                                   acpi_notify_handler handler);
+acpi_status acpi_add_pm_notifier(struct acpi_device *adev, struct device *dev,
+                                void (*work_func)(struct work_struct *work));
+acpi_status acpi_remove_pm_notifier(struct acpi_device *adev);
 int acpi_pm_device_sleep_state(struct device *, int *, int);
 #else
 static inline acpi_status acpi_add_pm_notifier(struct acpi_device *adev,
-                                              acpi_notify_handler handler,
-                                              void *context)
+                                              struct device *dev,
+                                              void (*work_func)(struct work_struct *work))
 {
        return AE_SUPPORT;
 }
-static inline acpi_status acpi_remove_pm_notifier(struct acpi_device *adev,
-                                                 acpi_notify_handler handler)
+static inline acpi_status acpi_remove_pm_notifier(struct acpi_device *adev)
 {
        return AE_SUPPORT;
 }
@@ -532,13 +539,8 @@ static inline int acpi_pm_device_sleep_state(struct device *d, int *p, int m)
 #endif
 
 #ifdef CONFIG_PM_RUNTIME
-int __acpi_device_run_wake(struct acpi_device *, bool);
 int acpi_pm_device_run_wake(struct device *, bool);
 #else
-static inline int __acpi_device_run_wake(struct acpi_device *adev, bool en)
-{
-       return -ENODEV;
-}
 static inline int acpi_pm_device_run_wake(struct device *dev, bool enable)
 {
        return -ENODEV;
@@ -546,14 +548,8 @@ static inline int acpi_pm_device_run_wake(struct device *dev, bool enable)
 #endif
 
 #ifdef CONFIG_PM_SLEEP
-int __acpi_device_sleep_wake(struct acpi_device *, u32, bool);
 int acpi_pm_device_sleep_wake(struct device *, bool);
 #else
-static inline int __acpi_device_sleep_wake(struct acpi_device *adev,
-                                          u32 target_state, bool enable)
-{
-       return -ENODEV;
-}
 static inline int acpi_pm_device_sleep_wake(struct device *dev, bool enable)
 {
        return -ENODEV;
index f6f5f8af211245571f2c2bd38415afe7e988eb94..03b3e6d405ffdb7b538cab4d967040fedde4fbef 100644 (file)
@@ -399,4 +399,35 @@ char *acpi_os_get_next_filename(void *dir_handle);
 void acpi_os_close_directory(void *dir_handle);
 #endif
 
+/*
+ * File I/O and related support
+ */
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_open_file
+ACPI_FILE acpi_os_open_file(const char *path, u8 modes);
+#endif
+
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_close_file
+void acpi_os_close_file(ACPI_FILE file);
+#endif
+
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_read_file
+int
+acpi_os_read_file(ACPI_FILE file,
+                 void *buffer, acpi_size size, acpi_size count);
+#endif
+
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_write_file
+int
+acpi_os_write_file(ACPI_FILE file,
+                  void *buffer, acpi_size size, acpi_size count);
+#endif
+
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_file_offset
+long acpi_os_get_file_offset(ACPI_FILE file);
+#endif
+
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_set_file_offset
+acpi_status acpi_os_set_file_offset(ACPI_FILE file, long offset, u8 from);
+#endif
+
 #endif                         /* __ACPIOSXF_H__ */
index 35b525c197114292423cbd76dd030e90a115e91d..c3f38bc459e1d155cfc6cdc5db7df5d92984c23c 100644 (file)
 
 /* Current ACPICA subsystem version in YYYYMMDD format */
 
-#define ACPI_CA_VERSION                 0x20140424
+#define ACPI_CA_VERSION                 0x20140627
 
 #include <acpi/acconfig.h>
 #include <acpi/actypes.h>
 #include <acpi/actbl.h>
 #include <acpi/acbuffer.h>
 
-extern u8 acpi_gbl_permanent_mmap;
-
 /*****************************************************************************
  *
  * Macros used for ACPICA globals and configuration
@@ -335,6 +333,23 @@ ACPI_GLOBAL(u8, acpi_gbl_system_awake_and_running);
 
 #endif                         /* ACPI_DEBUG_OUTPUT */
 
+/*
+ * Application prototypes
+ *
+ * All interfaces used by application will be configured
+ * out of the ACPICA build unless the ACPI_APPLICATION
+ * flag is defined.
+ */
+#ifdef ACPI_APPLICATION
+#define ACPI_APP_DEPENDENT_RETURN_VOID(prototype) \
+       prototype;
+
+#else
+#define ACPI_APP_DEPENDENT_RETURN_VOID(prototype) \
+       static ACPI_INLINE prototype {return;}
+
+#endif                         /* ACPI_APPLICATION */
+
 /*****************************************************************************
  *
  * ACPICA public interface prototypes
@@ -657,6 +672,10 @@ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
                                acpi_finish_gpe(acpi_handle gpe_device,
                                                u32 gpe_number))
 
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+                               acpi_mark_gpe_for_wake(acpi_handle gpe_device,
+                                                      u32 gpe_number))
+
 ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
                                acpi_setup_gpe_for_wake(acpi_handle
                                                        parent_device,
@@ -861,21 +880,32 @@ ACPI_DBG_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(6)
                                                     const char *module_name,
                                                     u32 component_id,
                                                     const char *format, ...))
+ACPI_APP_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(1)
+                               void ACPI_INTERNAL_VAR_XFACE
+                               acpi_log_error(const char *format, ...))
 
 /*
  * Divergences
  */
-acpi_status acpi_get_id(acpi_handle object, acpi_owner_id * out_type);
+ACPI_GLOBAL(u8, acpi_gbl_permanent_mmap);
 
-acpi_status acpi_unload_table_id(acpi_owner_id id);
+ACPI_EXTERNAL_RETURN_STATUS(acpi_status
+                           acpi_get_id(acpi_handle object,
+                                       acpi_owner_id * out_type))
 
-acpi_status
-acpi_get_table_with_size(acpi_string signature,
-              u32 instance, struct acpi_table_header **out_table,
-              acpi_size *tbl_size);
+ACPI_EXTERNAL_RETURN_STATUS(acpi_status acpi_unload_table_id(acpi_owner_id id))
 
-acpi_status
-acpi_get_data_full(acpi_handle object, acpi_object_handler handler, void **data,
-                  void (*callback)(void *));
+ACPI_EXTERNAL_RETURN_STATUS(acpi_status
+                           acpi_get_table_with_size(acpi_string signature,
+                                                    u32 instance,
+                                                    struct acpi_table_header
+                                                    **out_table,
+                                                    acpi_size *tbl_size))
+
+ACPI_EXTERNAL_RETURN_STATUS(acpi_status
+                           acpi_get_data_full(acpi_handle object,
+                                              acpi_object_handler handler,
+                                              void **data,
+                                              void (*callback)(void *)))
 
 #endif                         /* __ACXFACE_H__ */
index 4ad7da8051802a835acc9bd9ab6c8c27516a57a8..9613e8e9796040473d8e28ddf654a815d44fbcfe 100644 (file)
@@ -604,7 +604,7 @@ struct acpi_hest_generic {
 
 /* Generic Error Status block */
 
-struct acpi_generic_status {
+struct acpi_hest_generic_status {
        u32 block_status;
        u32 raw_data_offset;
        u32 raw_data_length;
@@ -614,15 +614,15 @@ struct acpi_generic_status {
 
 /* Values for block_status flags above */
 
-#define ACPI_GEN_ERR_UC                        BIT(0)
-#define ACPI_GEN_ERR_CE                        BIT(1)
-#define ACPI_GEN_ERR_MULTI_UC          BIT(2)
-#define ACPI_GEN_ERR_MULTI_CE          BIT(3)
-#define ACPI_GEN_ERR_COUNT_SHIFT       (0xFF<<4) /* 8 bits, error count */
+#define ACPI_HEST_UNCORRECTABLE             (1)
+#define ACPI_HEST_CORRECTABLE               (1<<1)
+#define ACPI_HEST_MULTIPLE_UNCORRECTABLE    (1<<2)
+#define ACPI_HEST_MULTIPLE_CORRECTABLE      (1<<3)
+#define ACPI_HEST_ERROR_ENTRY_COUNT         (0xFF<<4)  /* 8 bits, error count */
 
 /* Generic Error Data entry */
 
-struct acpi_generic_data {
+struct acpi_hest_generic_data {
        u8 section_type[16];
        u32 error_severity;
        u16 revision;
index 860e5c883eb3cddc9b876a11437b97894d582fee..21314d37cb079415f0b900bb63cfc5d2e224d3bc 100644 (file)
@@ -516,7 +516,7 @@ struct acpi_dmar_andd {
        struct acpi_dmar_header header;
        u8 reserved[3];
        u8 device_number;
-       u8 object_name[];
+       char object_name[1];
 };
 
 /*******************************************************************************
index 19b26bb69a70b6025d305048b48fb7547a51ea2c..608a04019372e56f59aeab4a323abe216bfce875 100644 (file)
 typedef unsigned char u8;
 typedef unsigned char u8;
 typedef unsigned short u16;
+typedef short s16;
 typedef COMPILER_DEPENDENT_UINT64 u64;
 typedef COMPILER_DEPENDENT_INT64 s64;
 
@@ -1244,4 +1245,17 @@ struct acpi_memory_list {
 #define ACPI_OSI_WIN_7                  0x0B
 #define ACPI_OSI_WIN_8                  0x0C
 
+/* Definitions of file IO */
+
+#define ACPI_FILE_READING               0x01
+#define ACPI_FILE_WRITING               0x02
+#define ACPI_FILE_BINARY                0x04
+
+#define ACPI_FILE_BEGIN                 0x01
+#define ACPI_FILE_END                   0x02
+
+/* Definitions of getopt */
+
+#define ACPI_OPT_END                    -1
+
 #endif                         /* __ACTYPES_H__ */
index dfd60d0bfd2783ec8d93e53c9134c48d5b690bde..720446cb243e81f07a02a8763eea0994ce43e772 100644 (file)
@@ -14,7 +14,7 @@
 
 struct ghes {
        struct acpi_hest_generic *generic;
-       struct acpi_generic_status *estatus;
+       struct acpi_hest_generic_status *estatus;
        u64 buffer_paddr;
        unsigned long flags;
        union {
index e863dd5c4e0417411754910c334ee870d8bd8ad7..5f8cc1fa3278b071c2f733e8f62b44cd10fac2bb 100644 (file)
 #define ACPI_DBG_TRACK_ALLOCATIONS
 #endif
 
-/* acpi_names configuration. Single threaded with debugger output enabled. */
-
-#ifdef ACPI_NAMES_APP
-#define ACPI_DEBUGGER
-#define ACPI_APPLICATION
-#define ACPI_SINGLE_THREADED
-#endif
-
 /*
- * acpi_bin/acpi_dump/acpi_src/acpi_xtract/Example configuration. All single
- * threaded, with no debug output.
+ * acpi_bin/acpi_dump/acpi_help/acpi_names/acpi_src/acpi_xtract/Example configuration.
+ * All single threaded.
  */
 #if (defined ACPI_BIN_APP)      || \
        (defined ACPI_DUMP_APP)     || \
+       (defined ACPI_HELP_APP)     || \
+       (defined ACPI_NAMES_APP)    || \
        (defined ACPI_SRC_APP)      || \
        (defined ACPI_XTRACT_APP)   || \
        (defined ACPI_EXAMPLE_APP)
 #define ACPI_SINGLE_THREADED
 #endif
 
+/* acpi_help configuration. Error messages disabled. */
+
 #ifdef ACPI_HELP_APP
-#define ACPI_APPLICATION
-#define ACPI_SINGLE_THREADED
 #define ACPI_NO_ERROR_MESSAGES
 #endif
 
+/* acpi_names configuration. Debug output enabled. */
+
+#ifdef ACPI_NAMES_APP
+#define ACPI_DEBUG_OUTPUT
+#endif
+
+/* acpi_exec/acpi_names/Example configuration. Native RSDP used. */
+
+#if (defined ACPI_EXEC_APP)     || \
+       (defined ACPI_EXAMPLE_APP)  || \
+       (defined ACPI_NAMES_APP)
+#define ACPI_USE_NATIVE_RSDP_POINTER
+#endif
+
+/* acpi_dump configuration. Native mapping used if provied by OSPMs */
+
+#ifdef ACPI_DUMP_APP
+#define ACPI_USE_NATIVE_MEMORY_MAPPING
+#define USE_NATIVE_ALLOCATE_ZEROED
+#endif
+
+/* acpi_names/Example configuration. Hardware disabled */
+
+#if (defined ACPI_EXAMPLE_APP)  || \
+       (defined ACPI_NAMES_APP)
+#define ACPI_REDUCED_HARDWARE 1
+#endif
+
 /* Linkable ACPICA library */
 
 #ifdef ACPI_LIBRARY
 #elif defined(_AED_EFI)
 #include "acefi.h"
 
+#elif defined(_GNU_EFI)
+#include "acefi.h"
+
 #elif defined(__HAIKU__)
 #include "achaiku.h"
 
@@ -399,8 +424,12 @@ typedef char *va_list;
 #ifdef ACPI_APPLICATION
 #include <stdio.h>
 #define ACPI_FILE              FILE *
+#define ACPI_FILE_OUT          stdout
+#define ACPI_FILE_ERR          stderr
 #else
 #define ACPI_FILE              void *
+#define ACPI_FILE_OUT          NULL
+#define ACPI_FILE_ERR          NULL
 #endif                         /* ACPI_APPLICATION */
 #endif                         /* ACPI_FILE */
 
index cd1f052d55bb439175c5eb9624c38891b6cf3a0a..1ba7c190c2ccbfc22ca9f576a0f7faf9ef96d979 100644 (file)
 #ifndef __ACLINUX_H__
 #define __ACLINUX_H__
 
+#ifdef __KERNEL__
+
+/* ACPICA external files should not include ACPICA headers directly. */
+
+#if !defined(BUILDING_ACPICA) && !defined(_LINUX_ACPI_H)
+#error "Please don't include <acpi/acpi.h> directly, include <linux/acpi.h> instead."
+#endif
+
+#endif
+
 /* Common (in-kernel/user-space) ACPICA configuration */
 
 #define ACPI_USE_SYSTEM_CLIBRARY
@@ -70,7 +80,9 @@
 #ifdef EXPORT_ACPI_INTERFACES
 #include <linux/export.h>
 #endif
+#ifdef CONFIG_ACPI
 #include <asm/acenv.h>
+#endif
 
 #ifndef CONFIG_ACPI
 
index 191e741cfa0ed017bd0ec845152e814a15c0ba08..568d4b886712ddd8aae8ba244878d1ca38e30b65 100644 (file)
 
 #ifdef __KERNEL__
 
+#ifndef ACPI_USE_NATIVE_DIVIDE
+
+#ifndef ACPI_DIV_64_BY_32
+#define ACPI_DIV_64_BY_32(n_hi, n_lo, d32, q32, r32) \
+       do { \
+               u64 (__n) = ((u64) n_hi) << 32 | (n_lo); \
+               (r32) = do_div ((__n), (d32)); \
+               (q32) = (u32) (__n); \
+       } while (0)
+#endif
+
+#ifndef ACPI_SHIFT_RIGHT_64
+#define ACPI_SHIFT_RIGHT_64(n_hi, n_lo) \
+       do { \
+               (n_lo) >>= 1; \
+               (n_lo) |= (((n_hi) & 1) << 31); \
+               (n_hi) >>= 1; \
+       } while (0)
+#endif
+
+#endif
+
 /*
  * Overrides for in-kernel ACPICA
  */
index 358c01b971db937c3f0da1ab4326d5fcab44d191..5320153c311ba1e5e5a4765f84d9f264351cc365 100644 (file)
 #include <linux/ioport.h>      /* for struct resource */
 #include <linux/device.h>
 
-#ifdef CONFIG_ACPI
-
 #ifndef _LINUX
 #define _LINUX
 #endif
+#include <acpi/acpi.h>
+
+#ifdef CONFIG_ACPI
 
 #include <linux/list.h>
 #include <linux/mod_devicetable.h>
 #include <linux/dynamic_debug.h>
 
-#include <acpi/acpi.h>
 #include <acpi/acpi_bus.h>
 #include <acpi/acpi_drivers.h>
 #include <acpi/acpi_numa.h>
index ec4112d257bca140886bdadea4de5a8ef468c080..8f8ae95c6e279fed83180d319a26d725f628dbd2 100644 (file)
@@ -482,8 +482,8 @@ extern struct cpufreq_governor cpufreq_gov_conservative;
  *********************************************************************/
 
 /* Special Values of .frequency field */
-#define CPUFREQ_ENTRY_INVALID  ~0
-#define CPUFREQ_TABLE_END      ~1
+#define CPUFREQ_ENTRY_INVALID  ~0u
+#define CPUFREQ_TABLE_END      ~1u
 /* Special Values of .flags field */
 #define CPUFREQ_BOOST_FREQ     (1 << 0)
 
index 5ab4e3a76721760e4d5a70f88de282a687858990..92abb497ab14bb50142652ead5dcf3f630556e3f 100644 (file)
@@ -593,6 +593,7 @@ struct ata_host {
        struct device           *dev;
        void __iomem * const    *iomap;
        unsigned int            n_ports;
+       unsigned int            n_tags;                 /* nr of NCQ tags */
        void                    *private_data;
        struct ata_port_operations *ops;
        unsigned long           flags;
index b12f4bbd064ce891c0f844b4d5180710ff613b0d..35b51e7af88659f5c1f4e11ff84bc80c3420f6a1 100644 (file)
@@ -578,8 +578,6 @@ struct mlx4_cq {
        u32                     cons_index;
 
        u16                     irq;
-       bool                    irq_affinity_change;
-
        __be32                 *set_ci_db;
        __be32                 *arm_db;
        int                     arm_sn;
@@ -1167,6 +1165,8 @@ int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
                   int *vector);
 void mlx4_release_eq(struct mlx4_dev *dev, int vec);
 
+int mlx4_eq_get_irq(struct mlx4_dev *dev, int vec);
+
 int mlx4_get_phys_port_id(struct mlx4_dev *dev);
 int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port);
 int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port);
index 11692dea18aa25a9410ce842cdd851a4d15560c7..42aa9b9ecd5f8ded624389a476952c7b5ae9a2a8 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/lockdep.h>
 #include <linux/atomic.h>
 #include <asm/processor.h>
+#include <linux/osq_lock.h>
 
 /*
  * Simple, straightforward mutexes with strict semantics:
@@ -46,7 +47,6 @@
  * - detects multi-task circular deadlocks and prints out all affected
  *   locks and tasks (and only those tasks)
  */
-struct optimistic_spin_queue;
 struct mutex {
        /* 1: unlocked, 0: locked, negative: locked, possible waiters */
        atomic_t                count;
@@ -56,7 +56,7 @@ struct mutex {
        struct task_struct      *owner;
 #endif
 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
-       struct optimistic_spin_queue    *osq;   /* Spinner MCS lock */
+       struct optimistic_spin_queue osq; /* Spinner MCS lock */
 #endif
 #ifdef CONFIG_DEBUG_MUTEXES
        const char              *name;
index a70c9493d55a4c01976b093b965cc91b270aaf92..d449018d07265200f4d8d6eeaf8ddac1a9010ebd 100644 (file)
@@ -25,9 +25,6 @@ struct phy_device *of_phy_attach(struct net_device *dev,
 
 extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np);
 
-extern void of_mdiobus_link_phydev(struct mii_bus *mdio,
-                                  struct phy_device *phydev);
-
 #else /* CONFIG_OF */
 static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
 {
@@ -63,11 +60,6 @@ static inline struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np)
 {
        return NULL;
 }
-
-static inline void of_mdiobus_link_phydev(struct mii_bus *mdio,
-                                         struct phy_device *phydev)
-{
-}
 #endif /* CONFIG_OF */
 
 #if defined(CONFIG_OF) && defined(CONFIG_FIXED_PHY)
diff --git a/include/linux/osq_lock.h b/include/linux/osq_lock.h
new file mode 100644 (file)
index 0000000..90230d5
--- /dev/null
@@ -0,0 +1,27 @@
+#ifndef __LINUX_OSQ_LOCK_H
+#define __LINUX_OSQ_LOCK_H
+
+/*
+ * An MCS like lock especially tailored for optimistic spinning for sleeping
+ * lock implementations (mutex, rwsem, etc).
+ */
+
+#define OSQ_UNLOCKED_VAL (0)
+
+struct optimistic_spin_queue {
+       /*
+        * Stores an encoded value of the CPU # of the tail node in the queue.
+        * If the queue is empty, then it's set to OSQ_UNLOCKED_VAL.
+        */
+       atomic_t tail;
+};
+
+/* Init macro and function. */
+#define OSQ_LOCK_UNLOCKED { ATOMIC_INIT(OSQ_UNLOCKED_VAL) }
+
+static inline void osq_lock_init(struct optimistic_spin_queue *lock)
+{
+       atomic_set(&lock->tail, OSQ_UNLOCKED_VAL);
+}
+
+#endif
index 0a97b583ee8d12ae696cd3d33a13ee17c62b518f..e1474ae18c8847cba4a2f17c396c6e7b59167fed 100644 (file)
@@ -398,6 +398,18 @@ static inline struct page *read_mapping_page(struct address_space *mapping,
        return read_cache_page(mapping, index, filler, data);
 }
 
+/*
+ * Get the offset in PAGE_SIZE.
+ * (TODO: hugepage should have ->index in PAGE_SIZE)
+ */
+static inline pgoff_t page_to_pgoff(struct page *page)
+{
+       if (unlikely(PageHeadHuge(page)))
+               return page->index << compound_order(page);
+       else
+               return page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+}
+
 /*
  * Return byte-offset into filesystem object for page.
  */
index 637a608ded0b0091503db1ac4aaa04cf7155823c..64dacb7288a6fc79e86ca536753f342a37252949 100644 (file)
 #include <linux/acpi.h>
 
 #ifdef CONFIG_ACPI
-extern acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev,
-                                                struct pci_bus *pci_bus);
-extern acpi_status pci_acpi_remove_bus_pm_notifier(struct acpi_device *dev);
+extern acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev);
+static inline acpi_status pci_acpi_remove_bus_pm_notifier(struct acpi_device *dev)
+{
+       return acpi_remove_pm_notifier(dev);
+}
 extern acpi_status pci_acpi_add_pm_notifier(struct acpi_device *dev,
                                             struct pci_dev *pci_dev);
-extern acpi_status pci_acpi_remove_pm_notifier(struct acpi_device *dev);
+static inline acpi_status pci_acpi_remove_pm_notifier(struct acpi_device *dev)
+{
+       return acpi_remove_pm_notifier(dev);
+}
 extern phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle);
 
 static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev)
index 43fd6716f66207bc824b94f1fdbfb44bc37d3534..367f49b9a1c93e5f48ede4b0b97d09ce4f73ce3e 100644 (file)
 #define RPM_AUTO               0x08    /* Use autosuspend_delay */
 
 #ifdef CONFIG_PM
+extern struct workqueue_struct *pm_wq;
+
+static inline bool queue_pm_work(struct work_struct *work)
+{
+       return queue_work(pm_wq, work);
+}
+
 extern int pm_generic_runtime_suspend(struct device *dev);
 extern int pm_generic_runtime_resume(struct device *dev);
 extern int pm_runtime_force_suspend(struct device *dev);
 extern int pm_runtime_force_resume(struct device *dev);
 #else
+static inline bool queue_pm_work(struct work_struct *work) { return false; }
+
 static inline int pm_generic_runtime_suspend(struct device *dev) { return 0; }
 static inline int pm_generic_runtime_resume(struct device *dev) { return 0; }
 static inline int pm_runtime_force_suspend(struct device *dev) { return 0; }
@@ -37,8 +46,6 @@ static inline int pm_runtime_force_resume(struct device *dev) { return 0; }
 
 #ifdef CONFIG_PM_RUNTIME
 
-extern struct workqueue_struct *pm_wq;
-
 extern int __pm_runtime_idle(struct device *dev, int rpmflags);
 extern int __pm_runtime_suspend(struct device *dev, int rpmflags);
 extern int __pm_runtime_resume(struct device *dev, int rpmflags);
index 5a75d19aa661e1f01f62c76eea3f314aa688e806..6a94cc8b1ca0872fe1fdb1216ea28526ddb5cd57 100644 (file)
@@ -44,7 +44,6 @@
 #include <linux/debugobjects.h>
 #include <linux/bug.h>
 #include <linux/compiler.h>
-#include <linux/percpu.h>
 #include <asm/barrier.h>
 
 extern int rcu_expedited; /* for sysctl */
@@ -299,41 +298,6 @@ static inline void rcu_user_hooks_switch(struct task_struct *prev,
 bool __rcu_is_watching(void);
 #endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */
 
-/*
- * Hooks for cond_resched() and friends to avoid RCU CPU stall warnings.
- */
-
-#define RCU_COND_RESCHED_LIM 256       /* ms vs. 100s of ms. */
-DECLARE_PER_CPU(int, rcu_cond_resched_count);
-void rcu_resched(void);
-
-/*
- * Is it time to report RCU quiescent states?
- *
- * Note unsynchronized access to rcu_cond_resched_count.  Yes, we might
- * increment some random CPU's count, and possibly also load the result from
- * yet another CPU's count.  We might even clobber some other CPU's attempt
- * to zero its counter.  This is all OK because the goal is not precision,
- * but rather reasonable amortization of rcu_note_context_switch() overhead
- * and extremely high probability of avoiding RCU CPU stall warnings.
- * Note that this function has to be preempted in just the wrong place,
- * many thousands of times in a row, for anything bad to happen.
- */
-static inline bool rcu_should_resched(void)
-{
-       return raw_cpu_inc_return(rcu_cond_resched_count) >=
-              RCU_COND_RESCHED_LIM;
-}
-
-/*
- * Report quiscent states to RCU if it is time to do so.
- */
-static inline void rcu_cond_resched(void)
-{
-       if (unlikely(rcu_should_resched()))
-               rcu_resched();
-}
-
 /*
  * Infrastructure to implement the synchronize_() primitives in
  * TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
@@ -358,9 +322,19 @@ void wait_rcu_gp(call_rcu_func_t crf);
  * initialization.
  */
 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
+void init_rcu_head(struct rcu_head *head);
+void destroy_rcu_head(struct rcu_head *head);
 void init_rcu_head_on_stack(struct rcu_head *head);
 void destroy_rcu_head_on_stack(struct rcu_head *head);
 #else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
+static inline void init_rcu_head(struct rcu_head *head)
+{
+}
+
+static inline void destroy_rcu_head(struct rcu_head *head)
+{
+}
+
 static inline void init_rcu_head_on_stack(struct rcu_head *head)
 {
 }
index d5b13bc07a0b7823795c2b94c4a798b6dae01aa8..561e8615528d424ae02b3b3bc8efefa2e9d30d43 100644 (file)
 #ifdef __KERNEL__
 /*
  * the rw-semaphore definition
- * - if activity is 0 then there are no active readers or writers
- * - if activity is +ve then that is the number of active readers
- * - if activity is -1 then there is one active writer
+ * - if count is 0 then there are no active readers or writers
+ * - if count is +ve then that is the number of active readers
+ * - if count is -1 then there is one active writer
  * - if wait_list is not empty, then there are processes waiting for the semaphore
  */
 struct rw_semaphore {
-       __s32                   activity;
+       __s32                   count;
        raw_spinlock_t          wait_lock;
        struct list_head        wait_list;
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
index 8d79708146aa47d642d37e82ca3beea2eaa7c014..035d3c57fc8a7147207c1d2cf4e532a355c9d8ae 100644 (file)
 #include <linux/kernel.h>
 #include <linux/list.h>
 #include <linux/spinlock.h>
-
 #include <linux/atomic.h>
+#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
+#include <linux/osq_lock.h>
+#endif
 
-struct optimistic_spin_queue;
 struct rw_semaphore;
 
 #ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
@@ -25,15 +26,15 @@ struct rw_semaphore;
 /* All arch specific implementations share the same struct */
 struct rw_semaphore {
        long count;
-       raw_spinlock_t wait_lock;
        struct list_head wait_list;
-#ifdef CONFIG_SMP
+       raw_spinlock_t wait_lock;
+#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
+       struct optimistic_spin_queue osq; /* spinner MCS lock */
        /*
         * Write owner. Used as a speculative check to see
         * if the owner is running on the cpu.
         */
        struct task_struct *owner;
-       struct optimistic_spin_queue *osq; /* spinner MCS lock */
 #endif
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
        struct lockdep_map      dep_map;
@@ -64,22 +65,19 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem)
 # define __RWSEM_DEP_MAP_INIT(lockname)
 #endif
 
-#if defined(CONFIG_SMP) && !defined(CONFIG_RWSEM_GENERIC_SPINLOCK)
-#define __RWSEM_INITIALIZER(name)                      \
-       { RWSEM_UNLOCKED_VALUE,                         \
-         __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock),     \
-         LIST_HEAD_INIT((name).wait_list),             \
-         NULL, /* owner */                             \
-         NULL /* mcs lock */                           \
-         __RWSEM_DEP_MAP_INIT(name) }
+#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
+#define __RWSEM_OPT_INIT(lockname) , .osq = OSQ_LOCK_UNLOCKED, .owner = NULL
 #else
-#define __RWSEM_INITIALIZER(name)                      \
-       { RWSEM_UNLOCKED_VALUE,                         \
-         __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock),     \
-         LIST_HEAD_INIT((name).wait_list)              \
-         __RWSEM_DEP_MAP_INIT(name) }
+#define __RWSEM_OPT_INIT(lockname)
 #endif
 
+#define __RWSEM_INITIALIZER(name)                              \
+       { .count = RWSEM_UNLOCKED_VALUE,                        \
+         .wait_list = LIST_HEAD_INIT((name).wait_list),        \
+         .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock) \
+         __RWSEM_OPT_INIT(name)                                \
+         __RWSEM_DEP_MAP_INIT(name) }
+
 #define DECLARE_RWSEM(name) \
        struct rw_semaphore name = __RWSEM_INITIALIZER(name)
 
index 306f4f0c987a006f43f520413f7de3a780f98a23..0376b054a0d0f426816737f4266038cef9531ace 100644 (file)
@@ -872,21 +872,21 @@ enum cpu_idle_type {
 #define SD_NUMA                        0x4000  /* cross-node balancing */
 
 #ifdef CONFIG_SCHED_SMT
-static inline const int cpu_smt_flags(void)
+static inline int cpu_smt_flags(void)
 {
        return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
 }
 #endif
 
 #ifdef CONFIG_SCHED_MC
-static inline const int cpu_core_flags(void)
+static inline int cpu_core_flags(void)
 {
        return SD_SHARE_PKG_RESOURCES;
 }
 #endif
 
 #ifdef CONFIG_NUMA
-static inline const int cpu_numa_flags(void)
+static inline int cpu_numa_flags(void)
 {
        return SD_NUMA;
 }
@@ -999,7 +999,7 @@ void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
 bool cpus_share_cache(int this_cpu, int that_cpu);
 
 typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
-typedef const int (*sched_domain_flags_f)(void);
+typedef int (*sched_domain_flags_f)(void);
 
 #define SDTL_OVERLAP   0x01
 
index 4723bbfa1c2643fe004d36b5b44367644ea524d9..a6e555cbe05c37a57ef70d2e31949c17a9d8b7dd 100644 (file)
@@ -63,8 +63,6 @@
 #include <linux/sfi.h>
 
 #ifdef CONFIG_SFI
-#include <acpi/acpi.h> /* FIXME: inclusion should be removed */
-
 extern int sfi_acpi_table_parse(char *signature, char *oem_id,
                                char *oem_table_id,
                                int (*handler)(struct acpi_table_header *));
@@ -78,7 +76,6 @@ static inline int __init acpi_sfi_table_parse(char *signature,
        return sfi_acpi_table_parse(signature, NULL, NULL, handler);
 }
 #else /* !CONFIG_SFI */
-
 static inline int sfi_acpi_table_parse(char *signature, char *oem_id,
                                char *oem_table_id,
                                int (*handler)(struct acpi_table_header *))
index 7277caf3743d269b8e547178c3ae7f58fbbd28e0..47f425464f847fd827719ac5da99cf2749824a14 100644 (file)
@@ -203,7 +203,6 @@ struct neigh_table {
        void                    (*proxy_redo)(struct sk_buff *skb);
        char                    *id;
        struct neigh_parms      parms;
-       /* HACK. gc_* should follow parms without a gap! */
        int                     gc_interval;
        int                     gc_thresh1;
        int                     gc_thresh2;
index 713b0b88bd5a4cb7e0820dc16bb60bfabe0cb945..c4d86198d3d6542088ed6db8d4daed2adf191806 100644 (file)
@@ -6,6 +6,7 @@
 #include <linux/netfilter/nfnetlink.h>
 #include <linux/netfilter/x_tables.h>
 #include <linux/netfilter/nf_tables.h>
+#include <linux/u64_stats_sync.h>
 #include <net/netlink.h>
 
 #define NFT_JUMP_STACK_SIZE    16
@@ -528,8 +529,9 @@ enum nft_chain_type {
 };
 
 struct nft_stats {
-       u64 bytes;
-       u64 pkts;
+       u64                     bytes;
+       u64                     pkts;
+       struct u64_stats_sync   syncp;
 };
 
 #define NFT_HOOK_OPS_MAX               2
index 079030c853d856d0604f5600154d57e79ab70515..e2070960bac009223c1c6caa1324d6323a478a7d 100644 (file)
@@ -16,7 +16,7 @@ struct netns_sysctl_lowpan {
 struct netns_ieee802154_lowpan {
        struct netns_sysctl_lowpan sysctl;
        struct netns_frags      frags;
-       u16                     max_dsize;
+       int                     max_dsize;
 };
 
 #endif
index 26a394cb91a8fde2bb5a3f8716d25e073e4b1862..eee608b12cc95f3267a00eed85467f94f761d298 100644 (file)
@@ -13,8 +13,8 @@ struct netns_nftables {
        struct nft_af_info      *inet;
        struct nft_af_info      *arp;
        struct nft_af_info      *bridge;
+       unsigned int            base_seq;
        u8                      gencursor;
-       u8                      genctr;
 };
 
 #endif
index 173cae485de1981e7c7f6b12cd545dfe1dfff3af..1563507457002532edd3ad0dfd7419c3a8cde42a 100644 (file)
@@ -1768,9 +1768,11 @@ __sk_dst_set(struct sock *sk, struct dst_entry *dst)
 static inline void
 sk_dst_set(struct sock *sk, struct dst_entry *dst)
 {
-       spin_lock(&sk->sk_dst_lock);
-       __sk_dst_set(sk, dst);
-       spin_unlock(&sk->sk_dst_lock);
+       struct dst_entry *old_dst;
+
+       sk_tx_queue_clear(sk);
+       old_dst = xchg((__force struct dst_entry **)&sk->sk_dst_cache, dst);
+       dst_release(old_dst);
 }
 
 static inline void
@@ -1782,9 +1784,7 @@ __sk_dst_reset(struct sock *sk)
 static inline void
 sk_dst_reset(struct sock *sk)
 {
-       spin_lock(&sk->sk_dst_lock);
-       __sk_dst_reset(sk);
-       spin_unlock(&sk->sk_dst_lock);
+       sk_dst_set(sk, NULL);
 }
 
 struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
index 40b5ca8a1b1f3028e5e03c5b3372f98e39437422..25084a052a1eff964d19a9683d5d6470590e4c7e 100644 (file)
  *  - add FATTR_CTIME
  *  - add ctime and ctimensec to fuse_setattr_in
  *  - add FUSE_RENAME2 request
+ *  - add FUSE_NO_OPEN_SUPPORT flag
  */
 
 #ifndef _LINUX_FUSE_H
@@ -229,6 +230,7 @@ struct fuse_file_lock {
  * FUSE_READDIRPLUS_AUTO: adaptive readdirplus
  * FUSE_ASYNC_DIO: asynchronous direct I/O submission
  * FUSE_WRITEBACK_CACHE: use writeback cache for buffered writes
+ * FUSE_NO_OPEN_SUPPORT: kernel supports zero-message opens
  */
 #define FUSE_ASYNC_READ                (1 << 0)
 #define FUSE_POSIX_LOCKS       (1 << 1)
@@ -247,6 +249,7 @@ struct fuse_file_lock {
 #define FUSE_READDIRPLUS_AUTO  (1 << 14)
 #define FUSE_ASYNC_DIO         (1 << 15)
 #define FUSE_WRITEBACK_CACHE   (1 << 16)
+#define FUSE_NO_OPEN_SUPPORT   (1 << 17)
 
 /**
  * CUSE INIT request/reply flags
index 35536d9c096420f3718b93e583c10d5fdfc07cec..76768ee812b27b7a48e13710ec23326af9b828af 100644 (file)
@@ -220,9 +220,16 @@ config INLINE_WRITE_UNLOCK_IRQRESTORE
 
 endif
 
+config ARCH_SUPPORTS_ATOMIC_RMW
+       bool
+
 config MUTEX_SPIN_ON_OWNER
        def_bool y
-       depends on SMP && !DEBUG_MUTEXES
+       depends on SMP && !DEBUG_MUTEXES && ARCH_SUPPORTS_ATOMIC_RMW
+
+config RWSEM_SPIN_ON_OWNER
+       def_bool y
+       depends on SMP && RWSEM_XCHGADD_ALGORITHM && ARCH_SUPPORTS_ATOMIC_RMW
 
 config ARCH_USE_QUEUE_RWLOCK
        bool
index a33d9a2bcbd73840aeed6c89af9c3af574c09eeb..6b17ac1b0c2a33c479a352fde633f47731807950 100644 (file)
@@ -2320,7 +2320,7 @@ static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
        next_parent = rcu_dereference(next_ctx->parent_ctx);
 
        /* If neither context have a parent context; they cannot be clones. */
-       if (!parent && !next_parent)
+       if (!parent || !next_parent)
                goto unlock;
 
        if (next_parent == ctx || next_ctx == parent || next_parent == parent) {
@@ -7458,7 +7458,19 @@ __perf_event_exit_task(struct perf_event *child_event,
                         struct perf_event_context *child_ctx,
                         struct task_struct *child)
 {
-       perf_remove_from_context(child_event, true);
+       /*
+        * Do not destroy the 'original' grouping; because of the context
+        * switch optimization the original events could've ended up in a
+        * random child task.
+        *
+        * If we were to destroy the original group, all group related
+        * operations would cease to function properly after this random
+        * child dies.
+        *
+        * Do destroy all inherited groups, we don't care about those
+        * and being thorough is better.
+        */
+       perf_remove_from_context(child_event, !!child_event->parent);
 
        /*
         * It can happen that the parent exits first, and has events
@@ -7474,7 +7486,7 @@ __perf_event_exit_task(struct perf_event *child_event,
 static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
 {
        struct perf_event *child_event, *next;
-       struct perf_event_context *child_ctx;
+       struct perf_event_context *child_ctx, *parent_ctx;
        unsigned long flags;
 
        if (likely(!child->perf_event_ctxp[ctxn])) {
@@ -7499,6 +7511,15 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
        raw_spin_lock(&child_ctx->lock);
        task_ctx_sched_out(child_ctx);
        child->perf_event_ctxp[ctxn] = NULL;
+
+       /*
+        * In order to avoid freeing: child_ctx->parent_ctx->task
+        * under perf_event_context::lock, grab another reference.
+        */
+       parent_ctx = child_ctx->parent_ctx;
+       if (parent_ctx)
+               get_ctx(parent_ctx);
+
        /*
         * If this context is a clone; unclone it so it can't get
         * swapped to another process while we're removing all
@@ -7508,6 +7529,13 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
        update_context_time(child_ctx);
        raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
 
+       /*
+        * Now that we no longer hold perf_event_context::lock, drop
+        * our extra child_ctx->parent_ctx reference.
+        */
+       if (parent_ctx)
+               put_ctx(parent_ctx);
+
        /*
         * Report the task dead after unscheduling the events so that we
         * won't get any samples after PERF_RECORD_EXIT. We can however still
index 3214289df5a7a8f6917718a9a00f418794efeab1..734e9a7d280bd22a046566cc40b1edf159f250bf 100644 (file)
@@ -2037,19 +2037,23 @@ static int __init populate_kprobe_blacklist(unsigned long *start,
 {
        unsigned long *iter;
        struct kprobe_blacklist_entry *ent;
-       unsigned long offset = 0, size = 0;
+       unsigned long entry, offset = 0, size = 0;
 
        for (iter = start; iter < end; iter++) {
-               if (!kallsyms_lookup_size_offset(*iter, &size, &offset)) {
-                       pr_err("Failed to find blacklist %p\n", (void *)*iter);
+               entry = arch_deref_entry_point((void *)*iter);
+
+               if (!kernel_text_address(entry) ||
+                   !kallsyms_lookup_size_offset(entry, &size, &offset)) {
+                       pr_err("Failed to find blacklist at %p\n",
+                               (void *)entry);
                        continue;
                }
 
                ent = kmalloc(sizeof(*ent), GFP_KERNEL);
                if (!ent)
                        return -ENOMEM;
-               ent->start_addr = *iter;
-               ent->end_addr = *iter + size;
+               ent->start_addr = entry;
+               ent->end_addr = entry + size;
                INIT_LIST_HEAD(&ent->list);
                list_add_tail(&ent->list, &kprobe_blacklist);
        }
index 838dc9e0066975943f835960b9801cdb6561afc5..be9ee1559fca5243bd2c6689117a8dde6ee50fb0 100644 (file)
  * called from interrupt context and we have preemption disabled while
  * spinning.
  */
-static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_queue, osq_node);
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_node, osq_node);
+
+/*
+ * We use the value 0 to represent "no CPU", thus the encoded value
+ * will be the CPU number incremented by 1.
+ */
+static inline int encode_cpu(int cpu_nr)
+{
+       return cpu_nr + 1;
+}
+
+static inline struct optimistic_spin_node *decode_cpu(int encoded_cpu_val)
+{
+       int cpu_nr = encoded_cpu_val - 1;
+
+       return per_cpu_ptr(&osq_node, cpu_nr);
+}
 
 /*
  * Get a stable @node->next pointer, either for unlock() or unqueue() purposes.
  * Can return NULL in case we were the last queued and we updated @lock instead.
  */
-static inline struct optimistic_spin_queue *
-osq_wait_next(struct optimistic_spin_queue **lock,
-             struct optimistic_spin_queue *node,
-             struct optimistic_spin_queue *prev)
+static inline struct optimistic_spin_node *
+osq_wait_next(struct optimistic_spin_queue *lock,
+             struct optimistic_spin_node *node,
+             struct optimistic_spin_node *prev)
 {
-       struct optimistic_spin_queue *next = NULL;
+       struct optimistic_spin_node *next = NULL;
+       int curr = encode_cpu(smp_processor_id());
+       int old;
+
+       /*
+        * If there is a prev node in queue, then the 'old' value will be
+        * the prev node's CPU #, else it's set to OSQ_UNLOCKED_VAL since if
+        * we're currently last in queue, then the queue will then become empty.
+        */
+       old = prev ? prev->cpu : OSQ_UNLOCKED_VAL;
 
        for (;;) {
-               if (*lock == node && cmpxchg(lock, node, prev) == node) {
+               if (atomic_read(&lock->tail) == curr &&
+                   atomic_cmpxchg(&lock->tail, curr, old) == curr) {
                        /*
                         * We were the last queued, we moved @lock back. @prev
                         * will now observe @lock and will complete its
@@ -59,18 +85,23 @@ osq_wait_next(struct optimistic_spin_queue **lock,
        return next;
 }
 
-bool osq_lock(struct optimistic_spin_queue **lock)
+bool osq_lock(struct optimistic_spin_queue *lock)
 {
-       struct optimistic_spin_queue *node = this_cpu_ptr(&osq_node);
-       struct optimistic_spin_queue *prev, *next;
+       struct optimistic_spin_node *node = this_cpu_ptr(&osq_node);
+       struct optimistic_spin_node *prev, *next;
+       int curr = encode_cpu(smp_processor_id());
+       int old;
 
        node->locked = 0;
        node->next = NULL;
+       node->cpu = curr;
 
-       node->prev = prev = xchg(lock, node);
-       if (likely(prev == NULL))
+       old = atomic_xchg(&lock->tail, curr);
+       if (old == OSQ_UNLOCKED_VAL)
                return true;
 
+       prev = decode_cpu(old);
+       node->prev = prev;
        ACCESS_ONCE(prev->next) = node;
 
        /*
@@ -149,20 +180,21 @@ unqueue:
        return false;
 }
 
-void osq_unlock(struct optimistic_spin_queue **lock)
+void osq_unlock(struct optimistic_spin_queue *lock)
 {
-       struct optimistic_spin_queue *node = this_cpu_ptr(&osq_node);
-       struct optimistic_spin_queue *next;
+       struct optimistic_spin_node *node, *next;
+       int curr = encode_cpu(smp_processor_id());
 
        /*
         * Fast path for the uncontended case.
         */
-       if (likely(cmpxchg(lock, node, NULL) == node))
+       if (likely(atomic_cmpxchg(&lock->tail, curr, OSQ_UNLOCKED_VAL) == curr))
                return;
 
        /*
         * Second most likely case.
         */
+       node = this_cpu_ptr(&osq_node);
        next = xchg(&node->next, NULL);
        if (next) {
                ACCESS_ONCE(next->locked) = 1;
index a2dbac4aca6b2f5aa447aa9edc12ec130456bb45..74356dc0ce298c8b9092edc4c48e7a6a62af3830 100644 (file)
@@ -118,12 +118,13 @@ void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
  * mutex_lock()/rwsem_down_{read,write}() etc.
  */
 
-struct optimistic_spin_queue {
-       struct optimistic_spin_queue *next, *prev;
+struct optimistic_spin_node {
+       struct optimistic_spin_node *next, *prev;
        int locked; /* 1 if lock acquired */
+       int cpu; /* encoded CPU # value */
 };
 
-extern bool osq_lock(struct optimistic_spin_queue **lock);
-extern void osq_unlock(struct optimistic_spin_queue **lock);
+extern bool osq_lock(struct optimistic_spin_queue *lock);
+extern void osq_unlock(struct optimistic_spin_queue *lock);
 
 #endif /* __LINUX_MCS_SPINLOCK_H */
index bc73d33c6760e174fd1bb2c8319c0faf5abc221f..acca2c1a3c5e550a42cae2256e0b88b02f352faa 100644 (file)
@@ -60,7 +60,7 @@ __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
        INIT_LIST_HEAD(&lock->wait_list);
        mutex_clear_owner(lock);
 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
-       lock->osq = NULL;
+       osq_lock_init(&lock->osq);
 #endif
 
        debug_mutex_init(lock, name, key);
index 9be8a9144978685b4c7eae4762d559f9896675a4..2c93571162cb7573f17a1eb7a0a424e07b061a25 100644 (file)
@@ -26,7 +26,7 @@ int rwsem_is_locked(struct rw_semaphore *sem)
        unsigned long flags;
 
        if (raw_spin_trylock_irqsave(&sem->wait_lock, flags)) {
-               ret = (sem->activity != 0);
+               ret = (sem->count != 0);
                raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
        }
        return ret;
@@ -46,7 +46,7 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name,
        debug_check_no_locks_freed((void *)sem, sizeof(*sem));
        lockdep_init_map(&sem->dep_map, name, key, 0);
 #endif
-       sem->activity = 0;
+       sem->count = 0;
        raw_spin_lock_init(&sem->wait_lock);
        INIT_LIST_HEAD(&sem->wait_list);
 }
@@ -95,7 +95,7 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
                waiter = list_entry(next, struct rwsem_waiter, list);
        } while (waiter->type != RWSEM_WAITING_FOR_WRITE);
 
-       sem->activity += woken;
+       sem->count += woken;
 
  out:
        return sem;
@@ -126,9 +126,9 @@ void __sched __down_read(struct rw_semaphore *sem)
 
        raw_spin_lock_irqsave(&sem->wait_lock, flags);
 
-       if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
+       if (sem->count >= 0 && list_empty(&sem->wait_list)) {
                /* granted */
-               sem->activity++;
+               sem->count++;
                raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
                goto out;
        }
@@ -170,9 +170,9 @@ int __down_read_trylock(struct rw_semaphore *sem)
 
        raw_spin_lock_irqsave(&sem->wait_lock, flags);
 
-       if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
+       if (sem->count >= 0 && list_empty(&sem->wait_list)) {
                /* granted */
-               sem->activity++;
+               sem->count++;
                ret = 1;
        }
 
@@ -206,7 +206,7 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
                 * itself into sleep and waiting for system woke it or someone
                 * else in the head of the wait list up.
                 */
-               if (sem->activity == 0)
+               if (sem->count == 0)
                        break;
                set_task_state(tsk, TASK_UNINTERRUPTIBLE);
                raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
@@ -214,7 +214,7 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
                raw_spin_lock_irqsave(&sem->wait_lock, flags);
        }
        /* got the lock */
-       sem->activity = -1;
+       sem->count = -1;
        list_del(&waiter.list);
 
        raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
@@ -235,9 +235,9 @@ int __down_write_trylock(struct rw_semaphore *sem)
 
        raw_spin_lock_irqsave(&sem->wait_lock, flags);
 
-       if (sem->activity == 0) {
+       if (sem->count == 0) {
                /* got the lock */
-               sem->activity = -1;
+               sem->count = -1;
                ret = 1;
        }
 
@@ -255,7 +255,7 @@ void __up_read(struct rw_semaphore *sem)
 
        raw_spin_lock_irqsave(&sem->wait_lock, flags);
 
-       if (--sem->activity == 0 && !list_empty(&sem->wait_list))
+       if (--sem->count == 0 && !list_empty(&sem->wait_list))
                sem = __rwsem_wake_one_writer(sem);
 
        raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
@@ -270,7 +270,7 @@ void __up_write(struct rw_semaphore *sem)
 
        raw_spin_lock_irqsave(&sem->wait_lock, flags);
 
-       sem->activity = 0;
+       sem->count = 0;
        if (!list_empty(&sem->wait_list))
                sem = __rwsem_do_wake(sem, 1);
 
@@ -287,7 +287,7 @@ void __downgrade_write(struct rw_semaphore *sem)
 
        raw_spin_lock_irqsave(&sem->wait_lock, flags);
 
-       sem->activity = 1;
+       sem->count = 1;
        if (!list_empty(&sem->wait_list))
                sem = __rwsem_do_wake(sem, 0);
 
index dacc32142fccaec5222ba82c01801ed7768a819f..a2391ac135c8d4ffb83aabee5870d89287ebf64f 100644 (file)
@@ -82,9 +82,9 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name,
        sem->count = RWSEM_UNLOCKED_VALUE;
        raw_spin_lock_init(&sem->wait_lock);
        INIT_LIST_HEAD(&sem->wait_list);
-#ifdef CONFIG_SMP
+#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
        sem->owner = NULL;
-       sem->osq = NULL;
+       osq_lock_init(&sem->osq);
 #endif
 }
 
@@ -262,7 +262,7 @@ static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem)
        return false;
 }
 
-#ifdef CONFIG_SMP
+#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
 /*
  * Try to acquire write lock before the writer has been put on wait queue.
  */
@@ -285,10 +285,10 @@ static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
 static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
 {
        struct task_struct *owner;
-       bool on_cpu = true;
+       bool on_cpu = false;
 
        if (need_resched())
-               return 0;
+               return false;
 
        rcu_read_lock();
        owner = ACCESS_ONCE(sem->owner);
@@ -297,9 +297,9 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
        rcu_read_unlock();
 
        /*
-        * If sem->owner is not set, the rwsem owner may have
-        * just acquired it and not set the owner yet or the rwsem
-        * has been released.
+        * If sem->owner is not set, yet we have just recently entered the
+        * slowpath, then there is a possibility reader(s) may have the lock.
+        * To be safe, avoid spinning in these situations.
         */
        return on_cpu;
 }
index 42f806de49d421092a7bd077c8efb4df9546cb94..e2d3bc7f03b41e1c01a7c8fc548ac162cdfa151e 100644 (file)
@@ -12,7 +12,7 @@
 
 #include <linux/atomic.h>
 
-#if defined(CONFIG_SMP) && defined(CONFIG_RWSEM_XCHGADD_ALGORITHM)
+#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
 static inline void rwsem_set_owner(struct rw_semaphore *sem)
 {
        sem->owner = current;
index 8e90f330f1398277d1b41b0e94c99e71759e2665..a18efed75fa7ea75083f523132ad0c5ab338d1e6 100644 (file)
@@ -615,7 +615,6 @@ static struct attribute_group attr_group = {
        .attrs = g,
 };
 
-#ifdef CONFIG_PM_RUNTIME
 struct workqueue_struct *pm_wq;
 EXPORT_SYMBOL_GPL(pm_wq);
 
@@ -625,9 +624,6 @@ static int __init pm_start_workqueue(void)
 
        return pm_wq ? 0 : -ENOMEM;
 }
-#else
-static inline int pm_start_workqueue(void) { return 0; }
-#endif
 
 static int __init pm_init(void)
 {
index 0ca8d83e2369e253706ff787c2e4243928fbcca0..4ee194eb524b3663dd39dfa7c22eb9565321853b 100644 (file)
@@ -186,6 +186,7 @@ void thaw_processes(void)
 
        printk("Restarting tasks ... ");
 
+       __usermodehelper_set_disable_depth(UMH_FREEZING);
        thaw_workqueues();
 
        read_lock(&tasklist_lock);
index 4dd8822f732a2a23835fca1f8f9a991ca756c3dd..ed35a4790afe13502a721fc66658a85660896299 100644 (file)
@@ -306,7 +306,7 @@ int suspend_devices_and_enter(suspend_state_t state)
                error = suspend_ops->begin(state);
                if (error)
                        goto Close;
-       } else if (state == PM_SUSPEND_FREEZE && freeze_ops->begin) {
+       } else if (state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->begin) {
                error = freeze_ops->begin();
                if (error)
                        goto Close;
@@ -335,7 +335,7 @@ int suspend_devices_and_enter(suspend_state_t state)
  Close:
        if (need_suspend_ops(state) && suspend_ops->end)
                suspend_ops->end();
-       else if (state == PM_SUSPEND_FREEZE && freeze_ops->end)
+       else if (state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->end)
                freeze_ops->end();
 
        return error;
index f1ba77363fbb937e41fcdd50564bb514a41cbbdc..625d0b0cd75a0a2227a3519d781b66ff02ab5523 100644 (file)
@@ -206,6 +206,70 @@ void rcu_bh_qs(int cpu)
        rdp->passed_quiesce = 1;
 }
 
+static DEFINE_PER_CPU(int, rcu_sched_qs_mask);
+
+static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
+       .dynticks_nesting = DYNTICK_TASK_EXIT_IDLE,
+       .dynticks = ATOMIC_INIT(1),
+#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
+       .dynticks_idle_nesting = DYNTICK_TASK_NEST_VALUE,
+       .dynticks_idle = ATOMIC_INIT(1),
+#endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
+};
+
+/*
+ * Let the RCU core know that this CPU has gone through the scheduler,
+ * which is a quiescent state.  This is called when the need for a
+ * quiescent state is urgent, so we burn an atomic operation and full
+ * memory barriers to let the RCU core know about it, regardless of what
+ * this CPU might (or might not) do in the near future.
+ *
+ * We inform the RCU core by emulating a zero-duration dyntick-idle
+ * period, which we in turn do by incrementing the ->dynticks counter
+ * by two.
+ */
+static void rcu_momentary_dyntick_idle(void)
+{
+       unsigned long flags;
+       struct rcu_data *rdp;
+       struct rcu_dynticks *rdtp;
+       int resched_mask;
+       struct rcu_state *rsp;
+
+       local_irq_save(flags);
+
+       /*
+        * Yes, we can lose flag-setting operations.  This is OK, because
+        * the flag will be set again after some delay.
+        */
+       resched_mask = raw_cpu_read(rcu_sched_qs_mask);
+       raw_cpu_write(rcu_sched_qs_mask, 0);
+
+       /* Find the flavor that needs a quiescent state. */
+       for_each_rcu_flavor(rsp) {
+               rdp = raw_cpu_ptr(rsp->rda);
+               if (!(resched_mask & rsp->flavor_mask))
+                       continue;
+               smp_mb(); /* rcu_sched_qs_mask before cond_resched_completed. */
+               if (ACCESS_ONCE(rdp->mynode->completed) !=
+                   ACCESS_ONCE(rdp->cond_resched_completed))
+                       continue;
+
+               /*
+                * Pretend to be momentarily idle for the quiescent state.
+                * This allows the grace-period kthread to record the
+                * quiescent state, with no need for this CPU to do anything
+                * further.
+                */
+               rdtp = this_cpu_ptr(&rcu_dynticks);
+               smp_mb__before_atomic(); /* Earlier stuff before QS. */
+               atomic_add(2, &rdtp->dynticks);  /* QS. */
+               smp_mb__after_atomic(); /* Later stuff after QS. */
+               break;
+       }
+       local_irq_restore(flags);
+}
+
 /*
  * Note a context switch.  This is a quiescent state for RCU-sched,
  * and requires special handling for preemptible RCU.
@@ -216,19 +280,12 @@ void rcu_note_context_switch(int cpu)
        trace_rcu_utilization(TPS("Start context switch"));
        rcu_sched_qs(cpu);
        rcu_preempt_note_context_switch(cpu);
+       if (unlikely(raw_cpu_read(rcu_sched_qs_mask)))
+               rcu_momentary_dyntick_idle();
        trace_rcu_utilization(TPS("End context switch"));
 }
 EXPORT_SYMBOL_GPL(rcu_note_context_switch);
 
-static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
-       .dynticks_nesting = DYNTICK_TASK_EXIT_IDLE,
-       .dynticks = ATOMIC_INIT(1),
-#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
-       .dynticks_idle_nesting = DYNTICK_TASK_NEST_VALUE,
-       .dynticks_idle = ATOMIC_INIT(1),
-#endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
-};
-
 static long blimit = 10;       /* Maximum callbacks per rcu_do_batch. */
 static long qhimark = 10000;   /* If this many pending, ignore blimit. */
 static long qlowmark = 100;    /* Once only this many pending, use blimit. */
@@ -243,6 +300,13 @@ static ulong jiffies_till_next_fqs = ULONG_MAX;
 module_param(jiffies_till_first_fqs, ulong, 0644);
 module_param(jiffies_till_next_fqs, ulong, 0644);
 
+/*
+ * How long the grace period must be before we start recruiting
+ * quiescent-state help from rcu_note_context_switch().
+ */
+static ulong jiffies_till_sched_qs = HZ / 20;
+module_param(jiffies_till_sched_qs, ulong, 0644);
+
 static bool rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
                                  struct rcu_data *rdp);
 static void force_qs_rnp(struct rcu_state *rsp,
@@ -853,6 +917,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
                                    bool *isidle, unsigned long *maxj)
 {
        unsigned int curr;
+       int *rcrmp;
        unsigned int snap;
 
        curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
@@ -893,27 +958,43 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
        }
 
        /*
-        * There is a possibility that a CPU in adaptive-ticks state
-        * might run in the kernel with the scheduling-clock tick disabled
-        * for an extended time period.  Invoke rcu_kick_nohz_cpu() to
-        * force the CPU to restart the scheduling-clock tick in this
-        * CPU is in this state.
-        */
-       rcu_kick_nohz_cpu(rdp->cpu);
-
-       /*
-        * Alternatively, the CPU might be running in the kernel
-        * for an extended period of time without a quiescent state.
-        * Attempt to force the CPU through the scheduler to gain the
-        * needed quiescent state, but only if the grace period has gone
-        * on for an uncommonly long time.  If there are many stuck CPUs,
-        * we will beat on the first one until it gets unstuck, then move
-        * to the next.  Only do this for the primary flavor of RCU.
+        * A CPU running for an extended time within the kernel can
+        * delay RCU grace periods.  When the CPU is in NO_HZ_FULL mode,
+        * even context-switching back and forth between a pair of
+        * in-kernel CPU-bound tasks cannot advance grace periods.
+        * So if the grace period is old enough, make the CPU pay attention.
+        * Note that the unsynchronized assignments to the per-CPU
+        * rcu_sched_qs_mask variable are safe.  Yes, setting of
+        * bits can be lost, but they will be set again on the next
+        * force-quiescent-state pass.  So lost bit sets do not result
+        * in incorrect behavior, merely in a grace period lasting
+        * a few jiffies longer than it might otherwise.  Because
+        * there are at most four threads involved, and because the
+        * updates are only once every few jiffies, the probability of
+        * lossage (and thus of slight grace-period extension) is
+        * quite low.
+        *
+        * Note that if the jiffies_till_sched_qs boot/sysfs parameter
+        * is set too high, we override with half of the RCU CPU stall
+        * warning delay.
         */
-       if (rdp->rsp == rcu_state_p &&
+       rcrmp = &per_cpu(rcu_sched_qs_mask, rdp->cpu);
+       if (ULONG_CMP_GE(jiffies,
+                        rdp->rsp->gp_start + jiffies_till_sched_qs) ||
            ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
-               rdp->rsp->jiffies_resched += 5;
-               resched_cpu(rdp->cpu);
+               if (!(ACCESS_ONCE(*rcrmp) & rdp->rsp->flavor_mask)) {
+                       ACCESS_ONCE(rdp->cond_resched_completed) =
+                               ACCESS_ONCE(rdp->mynode->completed);
+                       smp_mb(); /* ->cond_resched_completed before *rcrmp. */
+                       ACCESS_ONCE(*rcrmp) =
+                               ACCESS_ONCE(*rcrmp) + rdp->rsp->flavor_mask;
+                       resched_cpu(rdp->cpu);  /* Force CPU into scheduler. */
+                       rdp->rsp->jiffies_resched += 5; /* Enable beating. */
+               } else if (ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
+                       /* Time to beat on that CPU again! */
+                       resched_cpu(rdp->cpu);  /* Force CPU into scheduler. */
+                       rdp->rsp->jiffies_resched += 5; /* Re-enable beating. */
+               }
        }
 
        return 0;
@@ -3491,6 +3572,7 @@ static void __init rcu_init_one(struct rcu_state *rsp,
                               "rcu_node_fqs_1",
                               "rcu_node_fqs_2",
                               "rcu_node_fqs_3" };  /* Match MAX_RCU_LVLS */
+       static u8 fl_mask = 0x1;
        int cpustride = 1;
        int i;
        int j;
@@ -3509,6 +3591,8 @@ static void __init rcu_init_one(struct rcu_state *rsp,
        for (i = 1; i < rcu_num_lvls; i++)
                rsp->level[i] = rsp->level[i - 1] + rsp->levelcnt[i - 1];
        rcu_init_levelspread(rsp);
+       rsp->flavor_mask = fl_mask;
+       fl_mask <<= 1;
 
        /* Initialize the elements themselves, starting from the leaves. */
 
index bf2c1e669691725848b7a769e1aa00828935e40e..0f69a79c5b7dcd0891910318a4b1206aca8129e7 100644 (file)
@@ -307,6 +307,9 @@ struct rcu_data {
        /* 4) reasons this CPU needed to be kicked by force_quiescent_state */
        unsigned long dynticks_fqs;     /* Kicked due to dynticks idle. */
        unsigned long offline_fqs;      /* Kicked due to being offline. */
+       unsigned long cond_resched_completed;
+                                       /* Grace period that needs help */
+                                       /*  from cond_resched(). */
 
        /* 5) __rcu_pending() statistics. */
        unsigned long n_rcu_pending;    /* rcu_pending() calls since boot. */
@@ -392,6 +395,7 @@ struct rcu_state {
        struct rcu_node *level[RCU_NUM_LVLS];   /* Hierarchy levels. */
        u32 levelcnt[MAX_RCU_LVLS + 1];         /* # nodes in each level. */
        u8 levelspread[RCU_NUM_LVLS];           /* kids/node in each level. */
+       u8 flavor_mask;                         /* bit in flavor mask. */
        struct rcu_data __percpu *rda;          /* pointer of percu rcu_data. */
        void (*call)(struct rcu_head *head,     /* call_rcu() flavor. */
                     void (*func)(struct rcu_head *head));
@@ -563,7 +567,7 @@ static bool rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp);
 static void do_nocb_deferred_wakeup(struct rcu_data *rdp);
 static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp);
 static void rcu_spawn_nocb_kthreads(struct rcu_state *rsp);
-static void rcu_kick_nohz_cpu(int cpu);
+static void __maybe_unused rcu_kick_nohz_cpu(int cpu);
 static bool init_nocb_callback_list(struct rcu_data *rdp);
 static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq);
 static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq);
index cbc2c45265e2a7d94c4682ccf5abf3540978657b..02ac0fb186b82fb1005f65c2e86ac56da952dc88 100644 (file)
@@ -2404,7 +2404,7 @@ static bool init_nocb_callback_list(struct rcu_data *rdp)
  * if an adaptive-ticks CPU is failing to respond to the current grace
  * period and has not be idle from an RCU perspective, kick it.
  */
-static void rcu_kick_nohz_cpu(int cpu)
+static void __maybe_unused rcu_kick_nohz_cpu(int cpu)
 {
 #ifdef CONFIG_NO_HZ_FULL
        if (tick_nohz_full_cpu(cpu))
index a2aeb4df0f603e9ebf2b3a19d764291a3965d3e0..bc78835705302a8fb3602826189e83cfb98894b7 100644 (file)
@@ -200,12 +200,12 @@ void wait_rcu_gp(call_rcu_func_t crf)
 EXPORT_SYMBOL_GPL(wait_rcu_gp);
 
 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
-static inline void debug_init_rcu_head(struct rcu_head *head)
+void init_rcu_head(struct rcu_head *head)
 {
        debug_object_init(head, &rcuhead_debug_descr);
 }
 
-static inline void debug_rcu_head_free(struct rcu_head *head)
+void destroy_rcu_head(struct rcu_head *head)
 {
        debug_object_free(head, &rcuhead_debug_descr);
 }
@@ -350,21 +350,3 @@ static int __init check_cpu_stall_init(void)
 early_initcall(check_cpu_stall_init);
 
 #endif /* #ifdef CONFIG_RCU_STALL_COMMON */
-
-/*
- * Hooks for cond_resched() and friends to avoid RCU CPU stall warnings.
- */
-
-DEFINE_PER_CPU(int, rcu_cond_resched_count);
-
-/*
- * Report a set of RCU quiescent states, for use by cond_resched()
- * and friends.  Out of line due to being called infrequently.
- */
-void rcu_resched(void)
-{
-       preempt_disable();
-       __this_cpu_write(rcu_cond_resched_count, 0);
-       rcu_note_context_switch(smp_processor_id());
-       preempt_enable();
-}
index 3bdf01b494fe29c267a0abe73828b02a799a737d..bc1638b334494eee0138a13453e2d380a717428b 100644 (file)
@@ -4147,7 +4147,6 @@ static void __cond_resched(void)
 
 int __sched _cond_resched(void)
 {
-       rcu_cond_resched();
        if (should_resched()) {
                __cond_resched();
                return 1;
@@ -4166,18 +4165,15 @@ EXPORT_SYMBOL(_cond_resched);
  */
 int __cond_resched_lock(spinlock_t *lock)
 {
-       bool need_rcu_resched = rcu_should_resched();
        int resched = should_resched();
        int ret = 0;
 
        lockdep_assert_held(lock);
 
-       if (spin_needbreak(lock) || resched || need_rcu_resched) {
+       if (spin_needbreak(lock) || resched) {
                spin_unlock(lock);
                if (resched)
                        __cond_resched();
-               else if (unlikely(need_rcu_resched))
-                       rcu_resched();
                else
                        cpu_relax();
                ret = 1;
@@ -4191,7 +4187,6 @@ int __sched __cond_resched_softirq(void)
 {
        BUG_ON(!in_softirq());
 
-       rcu_cond_resched();  /* BH disabled OK, just recording QSes. */
        if (should_resched()) {
                local_bh_enable();
                __cond_resched();
index 695f9773bb6018fb75d4424d51e91cdbd7c1eb71..627b3c34b821de4471acd21e0591fc5fb8968a62 100644 (file)
@@ -608,7 +608,7 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
 
                avg_atom = p->se.sum_exec_runtime;
                if (nr_switches)
-                       do_div(avg_atom, nr_switches);
+                       avg_atom = div64_ul(avg_atom, nr_switches);
                else
                        avg_atom = -1LL;
 
index 88c9c65a430dadd15db4e1c08f7819f6e8bd2f80..fe75444ae7ec3847484e8ba5891320829ae45dc9 100644 (file)
@@ -585,9 +585,14 @@ static int alarm_timer_set(struct k_itimer *timr, int flags,
                                struct itimerspec *new_setting,
                                struct itimerspec *old_setting)
 {
+       ktime_t exp;
+
        if (!rtcdev)
                return -ENOTSUPP;
 
+       if (flags & ~TIMER_ABSTIME)
+               return -EINVAL;
+
        if (old_setting)
                alarm_timer_get(timr, old_setting);
 
@@ -597,8 +602,16 @@ static int alarm_timer_set(struct k_itimer *timr, int flags,
 
        /* start the timer */
        timr->it.alarm.interval = timespec_to_ktime(new_setting->it_interval);
-       alarm_start(&timr->it.alarm.alarmtimer,
-                       timespec_to_ktime(new_setting->it_value));
+       exp = timespec_to_ktime(new_setting->it_value);
+       /* Convert (if necessary) to absolute time */
+       if (flags != TIMER_ABSTIME) {
+               ktime_t now;
+
+               now = alarm_bases[timr->it.alarm.alarmtimer.type].gettime();
+               exp = ktime_add(now, exp);
+       }
+
+       alarm_start(&timr->it.alarm.alarmtimer, exp);
        return 0;
 }
 
@@ -730,6 +743,9 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags,
        if (!alarmtimer_get_rtcdev())
                return -ENOTSUPP;
 
+       if (flags & ~TIMER_ABSTIME)
+               return -EINVAL;
+
        if (!capable(CAP_WAKE_ALARM))
                return -EPERM;
 
index 5b372e3ed675c8ed10d45441060a6d7c5fe2a360..ac9d1dad630b3b806b8802e2c722639dad0eca9f 100644 (file)
@@ -265,12 +265,12 @@ static void update_ftrace_function(void)
                func = ftrace_ops_list_func;
        }
 
+       update_function_graph_func();
+
        /* If there's no change, then do nothing more here */
        if (ftrace_trace_function == func)
                return;
 
-       update_function_graph_func();
-
        /*
         * If we are using the list function, it doesn't care
         * about the function_trace_ops.
index 7c56c3d06943060d13ed611da638a09ccfa70ef4..ff7027199a9a32ea9281bca4e2049ebbb549dd38 100644 (file)
@@ -616,10 +616,6 @@ int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
        struct ring_buffer_per_cpu *cpu_buffer;
        struct rb_irq_work *work;
 
-       if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
-           (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
-               return POLLIN | POLLRDNORM;
-
        if (cpu == RING_BUFFER_ALL_CPUS)
                work = &buffer->irq_work;
        else {
index f243444a37729ae5c17ee0654b2b606acfedaae7..291397e66669d4d2f9f4090ce040b94f8e7187fe 100644 (file)
@@ -466,6 +466,12 @@ int __trace_puts(unsigned long ip, const char *str, int size)
        struct print_entry *entry;
        unsigned long irq_flags;
        int alloc;
+       int pc;
+
+       if (!(trace_flags & TRACE_ITER_PRINTK))
+               return 0;
+
+       pc = preempt_count();
 
        if (unlikely(tracing_selftest_running || tracing_disabled))
                return 0;
@@ -475,7 +481,7 @@ int __trace_puts(unsigned long ip, const char *str, int size)
        local_save_flags(irq_flags);
        buffer = global_trace.trace_buffer.buffer;
        event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, 
-                                         irq_flags, preempt_count());
+                                         irq_flags, pc);
        if (!event)
                return 0;
 
@@ -492,6 +498,7 @@ int __trace_puts(unsigned long ip, const char *str, int size)
                entry->buf[size] = '\0';
 
        __buffer_unlock_commit(buffer, event);
+       ftrace_trace_stack(buffer, irq_flags, 4, pc);
 
        return size;
 }
@@ -509,6 +516,12 @@ int __trace_bputs(unsigned long ip, const char *str)
        struct bputs_entry *entry;
        unsigned long irq_flags;
        int size = sizeof(struct bputs_entry);
+       int pc;
+
+       if (!(trace_flags & TRACE_ITER_PRINTK))
+               return 0;
+
+       pc = preempt_count();
 
        if (unlikely(tracing_selftest_running || tracing_disabled))
                return 0;
@@ -516,7 +529,7 @@ int __trace_bputs(unsigned long ip, const char *str)
        local_save_flags(irq_flags);
        buffer = global_trace.trace_buffer.buffer;
        event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
-                                         irq_flags, preempt_count());
+                                         irq_flags, pc);
        if (!event)
                return 0;
 
@@ -525,6 +538,7 @@ int __trace_bputs(unsigned long ip, const char *str)
        entry->str                      = str;
 
        __buffer_unlock_commit(buffer, event);
+       ftrace_trace_stack(buffer, irq_flags, 4, pc);
 
        return 1;
 }
@@ -809,7 +823,7 @@ static struct {
        { trace_clock_local,    "local",        1 },
        { trace_clock_global,   "global",       1 },
        { trace_clock_counter,  "counter",      0 },
-       { trace_clock_jiffies,  "uptime",       1 },
+       { trace_clock_jiffies,  "uptime",       0 },
        { trace_clock,          "perf",         1 },
        ARCH_TRACE_CLOCKS
 };
index 26dc348332b798eeb43a77cf2d89357512d9e8c0..57b67b1f24d1a141f88163c385e62be25cd275cf 100644 (file)
@@ -59,13 +59,14 @@ u64 notrace trace_clock(void)
 
 /*
  * trace_jiffy_clock(): Simply use jiffies as a clock counter.
+ * Note that this use of jiffies_64 is not completely safe on
+ * 32-bit systems. But the window is tiny, and the effect if
+ * we are affected is that we will have an obviously bogus
+ * timestamp on a trace event - i.e. not life threatening.
  */
 u64 notrace trace_clock_jiffies(void)
 {
-       u64 jiffy = jiffies - INITIAL_JIFFIES;
-
-       /* Return nsecs */
-       return (u64)jiffies_to_usecs(jiffy) * 1000ULL;
+       return jiffies_64_to_clock_t(jiffies_64 - INITIAL_JIFFIES);
 }
 
 /*
index f99e0b3bca8cba6372301afa4a56d35c69d01b6e..2de53628689f5fb8096204e1948c29d3e03a1d09 100644 (file)
@@ -470,6 +470,7 @@ static void remove_event_file_dir(struct ftrace_event_file *file)
 
        list_del(&file->list);
        remove_subsystem(file->system);
+       free_event_filter(file->filter);
        kmem_cache_free(file_cachep, file);
 }
 
index c101230658ebc9af1f7daf35252e293e0ce8d3f0..b6513a9f2892042a8a49a5bc872c0ca33990e2d8 100644 (file)
@@ -191,7 +191,7 @@ int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp)
 
        i %= num_online_cpus();
 
-       if (!cpumask_of_node(numa_node)) {
+       if (numa_node == -1 || !cpumask_of_node(numa_node)) {
                /* Use all online cpu's for non numa aware system */
                cpumask_copy(mask, cpu_online_mask);
        } else {
index 2024bbd573d2a9ca8a08842cdf0b99d2062cbee1..9221c02ed9e2b8ab495e0b39482e2bcfa5ba9957 100644 (file)
@@ -2604,6 +2604,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
                } else {
                        if (cow)
                                huge_ptep_set_wrprotect(src, addr, src_pte);
+                       entry = huge_ptep_get(src_pte);
                        ptepage = pte_page(entry);
                        get_page(ptepage);
                        page_dup_rmap(ptepage);
index c6399e32893178b835457388371e8e4f85512361..7211a73ba14d16155c1514c078b1e3c9d25e0b22 100644 (file)
@@ -435,7 +435,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
        if (av == NULL) /* Not actually mapped anymore */
                return;
 
-       pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+       pgoff = page_to_pgoff(page);
        read_lock(&tasklist_lock);
        for_each_process (tsk) {
                struct anon_vma_chain *vmac;
@@ -469,7 +469,7 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
        mutex_lock(&mapping->i_mmap_mutex);
        read_lock(&tasklist_lock);
        for_each_process(tsk) {
-               pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+               pgoff_t pgoff = page_to_pgoff(page);
                struct task_struct *t = task_early_kill(tsk, force_early);
 
                if (!t)
index d67fd9fcf1f2e11d8b77c513113475df125ad99d..7e8d8205b6108fcf5b0a97aad3da36c9455b230f 100644 (file)
@@ -2882,7 +2882,8 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
         * if page by the offset is not ready to be mapped (cold cache or
         * something).
         */
-       if (vma->vm_ops->map_pages && fault_around_pages() > 1) {
+       if (vma->vm_ops->map_pages && !(flags & FAULT_FLAG_NONLINEAR) &&
+           fault_around_pages() > 1) {
                pte = pte_offset_map_lock(mm, pmd, address, &ptl);
                do_fault_around(vma, address, pte, pgoff, flags);
                if (!pte_same(*pte, orig_pte))
index 9e0beaa918454abbcd63e94ee6cefb5f108f751f..be6dbf995c0cea7128fa58124057d8891cfa7933 100644 (file)
@@ -988,9 +988,10 @@ out:
         * it.  Otherwise, putback_lru_page() will drop the reference grabbed
         * during isolation.
         */
-       if (rc != MIGRATEPAGE_SUCCESS && put_new_page)
+       if (rc != MIGRATEPAGE_SUCCESS && put_new_page) {
+               ClearPageSwapBacked(newpage);
                put_new_page(newpage, private);
-       else
+       else
                putback_lru_page(newpage);
 
        if (result) {
index b7e94ebbd09e88c3b356e36fe89ed72b89e14474..22a4a7699cdbeb51e86c22ebbd4b1118693042f9 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -517,11 +517,7 @@ void page_unlock_anon_vma_read(struct anon_vma *anon_vma)
 static inline unsigned long
 __vma_address(struct page *page, struct vm_area_struct *vma)
 {
-       pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
-
-       if (unlikely(is_vm_hugetlb_page(vma)))
-               pgoff = page->index << huge_page_order(page_hstate(page));
-
+       pgoff_t pgoff = page_to_pgoff(page);
        return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
 }
 
@@ -1639,7 +1635,7 @@ static struct anon_vma *rmap_walk_anon_lock(struct page *page,
 static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc)
 {
        struct anon_vma *anon_vma;
-       pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+       pgoff_t pgoff = page_to_pgoff(page);
        struct anon_vma_chain *avc;
        int ret = SWAP_AGAIN;
 
@@ -1680,7 +1676,7 @@ static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc)
 static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc)
 {
        struct address_space *mapping = page->mapping;
-       pgoff_t pgoff = page->index << compound_order(page);
+       pgoff_t pgoff = page_to_pgoff(page);
        struct vm_area_struct *vma;
        int ret = SWAP_AGAIN;
 
index 1140f49b6ded6f7a72d89d2e89f9fce0df1a940d..af68b15a8fc1f99ede5cf82a38aecfc6b3b6eda6 100644 (file)
@@ -85,7 +85,7 @@ static struct vfsmount *shm_mnt;
  * a time): we would prefer not to enlarge the shmem inode just for that.
  */
 struct shmem_falloc {
-       int     mode;           /* FALLOC_FL mode currently operating */
+       wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
        pgoff_t start;          /* start of range currently being fallocated */
        pgoff_t next;           /* the next page offset to be fallocated */
        pgoff_t nr_falloced;    /* how many new pages have been fallocated */
@@ -468,23 +468,20 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
                return;
 
        index = start;
-       for ( ; ; ) {
+       while (index < end) {
                cond_resched();
 
                pvec.nr = find_get_entries(mapping, index,
                                min(end - index, (pgoff_t)PAGEVEC_SIZE),
                                pvec.pages, indices);
                if (!pvec.nr) {
-                       if (index == start || unfalloc)
+                       /* If all gone or hole-punch or unfalloc, we're done */
+                       if (index == start || end != -1)
                                break;
+                       /* But if truncating, restart to make sure all gone */
                        index = start;
                        continue;
                }
-               if ((index == start || unfalloc) && indices[0] >= end) {
-                       pagevec_remove_exceptionals(&pvec);
-                       pagevec_release(&pvec);
-                       break;
-               }
                mem_cgroup_uncharge_start();
                for (i = 0; i < pagevec_count(&pvec); i++) {
                        struct page *page = pvec.pages[i];
@@ -496,8 +493,12 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
                        if (radix_tree_exceptional_entry(page)) {
                                if (unfalloc)
                                        continue;
-                               nr_swaps_freed += !shmem_free_swap(mapping,
-                                                               index, page);
+                               if (shmem_free_swap(mapping, index, page)) {
+                                       /* Swap was replaced by page: retry */
+                                       index--;
+                                       break;
+                               }
+                               nr_swaps_freed++;
                                continue;
                        }
 
@@ -506,6 +507,11 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
                                if (page->mapping == mapping) {
                                        VM_BUG_ON_PAGE(PageWriteback(page), page);
                                        truncate_inode_page(mapping, page);
+                               } else {
+                                       /* Page was replaced by swap: retry */
+                                       unlock_page(page);
+                                       index--;
+                                       break;
                                }
                        }
                        unlock_page(page);
@@ -760,7 +766,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
                        spin_lock(&inode->i_lock);
                        shmem_falloc = inode->i_private;
                        if (shmem_falloc &&
-                           !shmem_falloc->mode &&
+                           !shmem_falloc->waitq &&
                            index >= shmem_falloc->start &&
                            index < shmem_falloc->next)
                                shmem_falloc->nr_unswapped++;
@@ -1248,38 +1254,58 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
         * Trinity finds that probing a hole which tmpfs is punching can
         * prevent the hole-punch from ever completing: which in turn
         * locks writers out with its hold on i_mutex.  So refrain from
-        * faulting pages into the hole while it's being punched, and
-        * wait on i_mutex to be released if vmf->flags permits.
+        * faulting pages into the hole while it's being punched.  Although
+        * shmem_undo_range() does remove the additions, it may be unable to
+        * keep up, as each new page needs its own unmap_mapping_range() call,
+        * and the i_mmap tree grows ever slower to scan if new vmas are added.
+        *
+        * It does not matter if we sometimes reach this check just before the
+        * hole-punch begins, so that one fault then races with the punch:
+        * we just need to make racing faults a rare case.
+        *
+        * The implementation below would be much simpler if we just used a
+        * standard mutex or completion: but we cannot take i_mutex in fault,
+        * and bloating every shmem inode for this unlikely case would be sad.
         */
        if (unlikely(inode->i_private)) {
                struct shmem_falloc *shmem_falloc;
 
                spin_lock(&inode->i_lock);
                shmem_falloc = inode->i_private;
-               if (!shmem_falloc ||
-                   shmem_falloc->mode != FALLOC_FL_PUNCH_HOLE ||
-                   vmf->pgoff < shmem_falloc->start ||
-                   vmf->pgoff >= shmem_falloc->next)
-                       shmem_falloc = NULL;
-               spin_unlock(&inode->i_lock);
-               /*
-                * i_lock has protected us from taking shmem_falloc seriously
-                * once return from shmem_fallocate() went back up that stack.
-                * i_lock does not serialize with i_mutex at all, but it does
-                * not matter if sometimes we wait unnecessarily, or sometimes
-                * miss out on waiting: we just need to make those cases rare.
-                */
-               if (shmem_falloc) {
+               if (shmem_falloc &&
+                   shmem_falloc->waitq &&
+                   vmf->pgoff >= shmem_falloc->start &&
+                   vmf->pgoff < shmem_falloc->next) {
+                       wait_queue_head_t *shmem_falloc_waitq;
+                       DEFINE_WAIT(shmem_fault_wait);
+
+                       ret = VM_FAULT_NOPAGE;
                        if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) &&
                           !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
+                               /* It's polite to up mmap_sem if we can */
                                up_read(&vma->vm_mm->mmap_sem);
-                               mutex_lock(&inode->i_mutex);
-                               mutex_unlock(&inode->i_mutex);
-                               return VM_FAULT_RETRY;
+                               ret = VM_FAULT_RETRY;
                        }
-                       /* cond_resched? Leave that to GUP or return to user */
-                       return VM_FAULT_NOPAGE;
+
+                       shmem_falloc_waitq = shmem_falloc->waitq;
+                       prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
+                                       TASK_UNINTERRUPTIBLE);
+                       spin_unlock(&inode->i_lock);
+                       schedule();
+
+                       /*
+                        * shmem_falloc_waitq points into the shmem_fallocate()
+                        * stack of the hole-punching task: shmem_falloc_waitq
+                        * is usually invalid by the time we reach here, but
+                        * finish_wait() does not dereference it in that case;
+                        * though i_lock needed lest racing with wake_up_all().
+                        */
+                       spin_lock(&inode->i_lock);
+                       finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
+                       spin_unlock(&inode->i_lock);
+                       return ret;
                }
+               spin_unlock(&inode->i_lock);
        }
 
        error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
@@ -1774,13 +1800,13 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
 
        mutex_lock(&inode->i_mutex);
 
-       shmem_falloc.mode = mode & ~FALLOC_FL_KEEP_SIZE;
-
        if (mode & FALLOC_FL_PUNCH_HOLE) {
                struct address_space *mapping = file->f_mapping;
                loff_t unmap_start = round_up(offset, PAGE_SIZE);
                loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
+               DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
 
+               shmem_falloc.waitq = &shmem_falloc_waitq;
                shmem_falloc.start = unmap_start >> PAGE_SHIFT;
                shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
                spin_lock(&inode->i_lock);
@@ -1792,8 +1818,13 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
                                            1 + unmap_end - unmap_start, 0);
                shmem_truncate_range(inode, offset, offset + len - 1);
                /* No need to unmap again: hole-punching leaves COWed pages */
+
+               spin_lock(&inode->i_lock);
+               inode->i_private = NULL;
+               wake_up_all(&shmem_falloc_waitq);
+               spin_unlock(&inode->i_lock);
                error = 0;
-               goto undone;
+               goto out;
        }
 
        /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
@@ -1809,6 +1840,7 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
                goto out;
        }
 
+       shmem_falloc.waitq = NULL;
        shmem_falloc.start = start;
        shmem_falloc.next  = start;
        shmem_falloc.nr_falloced = 0;
index 735e01a0db6f8c3ffa28150e5faedf362dc5b874..d31c4bacc6a203b0bc555bd76c2a97e90e78fa6c 100644 (file)
@@ -55,7 +55,7 @@ static int kmem_cache_sanity_check(const char *name, size_t size)
                        continue;
                }
 
-#if !defined(CONFIG_SLUB) || !defined(CONFIG_SLUB_DEBUG_ON)
+#if !defined(CONFIG_SLUB)
                if (!strcmp(s->name, name)) {
                        pr_err("%s (%s): Cache name already exists.\n",
                               __func__, name);
index 6a78c814bebfb151b1e490424c731edc172ad430..eda2473071648cc47935dd9e21e9f57fd402a4dd 100644 (file)
@@ -355,14 +355,16 @@ void truncate_inode_pages_range(struct address_space *mapping,
        for ( ; ; ) {
                cond_resched();
                if (!pagevec_lookup_entries(&pvec, mapping, index,
-                       min(end - index, (pgoff_t)PAGEVEC_SIZE),
-                       indices)) {
+                       min(end - index, (pgoff_t)PAGEVEC_SIZE), indices)) {
+                       /* If all gone from start onwards, we're done */
                        if (index == start)
                                break;
+                       /* Otherwise restart to make sure all gone */
                        index = start;
                        continue;
                }
                if (index == start && indices[0] >= end) {
+                       /* All gone out of hole to be punched, we're done */
                        pagevec_remove_exceptionals(&pvec);
                        pagevec_release(&pvec);
                        break;
@@ -373,8 +375,11 @@ void truncate_inode_pages_range(struct address_space *mapping,
 
                        /* We rely upon deletion not changing page->index */
                        index = indices[i];
-                       if (index >= end)
+                       if (index >= end) {
+                               /* Restart punch to make sure all gone */
+                               index = start - 1;
                                break;
+                       }
 
                        if (radix_tree_exceptional_entry(page)) {
                                clear_exceptional_entry(mapping, index, page);
index ad2ac3c003988741c066c2bb467c2abc4a523c5a..dd11f612e03e42684a6732dd84aee0995415c710 100644 (file)
@@ -627,8 +627,6 @@ static void vlan_dev_uninit(struct net_device *dev)
        struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
        int i;
 
-       free_percpu(vlan->vlan_pcpu_stats);
-       vlan->vlan_pcpu_stats = NULL;
        for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) {
                while ((pm = vlan->egress_priority_map[i]) != NULL) {
                        vlan->egress_priority_map[i] = pm->next;
@@ -785,6 +783,15 @@ static const struct net_device_ops vlan_netdev_ops = {
        .ndo_get_lock_subclass  = vlan_dev_get_lock_subclass,
 };
 
+static void vlan_dev_free(struct net_device *dev)
+{
+       struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+
+       free_percpu(vlan->vlan_pcpu_stats);
+       vlan->vlan_pcpu_stats = NULL;
+       free_netdev(dev);
+}
+
 void vlan_setup(struct net_device *dev)
 {
        ether_setup(dev);
@@ -794,7 +801,7 @@ void vlan_setup(struct net_device *dev)
        dev->tx_queue_len       = 0;
 
        dev->netdev_ops         = &vlan_netdev_ops;
-       dev->destructor         = free_netdev;
+       dev->destructor         = vlan_dev_free;
        dev->ethtool_ops        = &vlan_ethtool_ops;
 
        memset(dev->broadcast, 0, ETH_ALEN);
index 01a1082e02b3157b3abc84e6b23844ed3a26f2f2..bfcf6be1d665c89e3f88c4f9e34c452a03ced57d 100644 (file)
@@ -1489,8 +1489,6 @@ static int atalk_rcv(struct sk_buff *skb, struct net_device *dev,
                goto drop;
 
        /* Queue packet (standard) */
-       skb->sk = sock;
-
        if (sock_queue_rcv_skb(sock, skb) < 0)
                goto drop;
 
@@ -1644,7 +1642,6 @@ static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
        if (!skb)
                goto out;
 
-       skb->sk = sk;
        skb_reserve(skb, ddp_dl->header_length);
        skb_reserve(skb, dev->hard_header_len);
        skb->dev = dev;
index 6f0d9ec3795059fdc5319574b65b24c08aaf2790..a957c8140721def2878b292c2e61e2dd37c36e5c 100644 (file)
@@ -800,11 +800,6 @@ static int batadv_check_claim_group(struct batadv_priv *bat_priv,
        bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
        bla_dst_own = &bat_priv->bla.claim_dest;
 
-       /* check if it is a claim packet in general */
-       if (memcmp(bla_dst->magic, bla_dst_own->magic,
-                  sizeof(bla_dst->magic)) != 0)
-               return 0;
-
        /* if announcement packet, use the source,
         * otherwise assume it is in the hw_src
         */
@@ -866,12 +861,13 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
                                    struct batadv_hard_iface *primary_if,
                                    struct sk_buff *skb)
 {
-       struct batadv_bla_claim_dst *bla_dst;
+       struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
        uint8_t *hw_src, *hw_dst;
-       struct vlan_ethhdr *vhdr;
+       struct vlan_hdr *vhdr, vhdr_buf;
        struct ethhdr *ethhdr;
        struct arphdr *arphdr;
        unsigned short vid;
+       int vlan_depth = 0;
        __be16 proto;
        int headlen;
        int ret;
@@ -882,9 +878,24 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
        proto = ethhdr->h_proto;
        headlen = ETH_HLEN;
        if (vid & BATADV_VLAN_HAS_TAG) {
-               vhdr = vlan_eth_hdr(skb);
-               proto = vhdr->h_vlan_encapsulated_proto;
-               headlen += VLAN_HLEN;
+               /* Traverse the VLAN/Ethertypes.
+                *
+                * At this point it is known that the first protocol is a VLAN
+                * header, so start checking at the encapsulated protocol.
+                *
+                * The depth of the VLAN headers is recorded to drop BLA claim
+                * frames encapsulated into multiple VLAN headers (QinQ).
+                */
+               do {
+                       vhdr = skb_header_pointer(skb, headlen, VLAN_HLEN,
+                                                 &vhdr_buf);
+                       if (!vhdr)
+                               return 0;
+
+                       proto = vhdr->h_vlan_encapsulated_proto;
+                       headlen += VLAN_HLEN;
+                       vlan_depth++;
+               } while (proto == htons(ETH_P_8021Q));
        }
 
        if (proto != htons(ETH_P_ARP))
@@ -914,6 +925,19 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
        hw_src = (uint8_t *)arphdr + sizeof(struct arphdr);
        hw_dst = hw_src + ETH_ALEN + 4;
        bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
+       bla_dst_own = &bat_priv->bla.claim_dest;
+
+       /* check if it is a claim frame in general */
+       if (memcmp(bla_dst->magic, bla_dst_own->magic,
+                  sizeof(bla_dst->magic)) != 0)
+               return 0;
+
+       /* check if there is a claim frame encapsulated deeper in (QinQ) and
+        * drop that, as this is not supported by BLA but should also not be
+        * sent via the mesh.
+        */
+       if (vlan_depth > 1)
+               return 1;
 
        /* check if it is a claim frame. */
        ret = batadv_check_claim_group(bat_priv, primary_if, hw_src, hw_dst,
index e7ee65dc20bf4f25a1a8d0134c66b0bfaef25bd3..cbd677f48c00541fc8ff9aed5b0943d3855b1810 100644 (file)
@@ -448,10 +448,15 @@ out:
  *  possibly free it
  * @softif_vlan: the vlan object to release
  */
-void batadv_softif_vlan_free_ref(struct batadv_softif_vlan *softif_vlan)
+void batadv_softif_vlan_free_ref(struct batadv_softif_vlan *vlan)
 {
-       if (atomic_dec_and_test(&softif_vlan->refcount))
-               kfree_rcu(softif_vlan, rcu);
+       if (atomic_dec_and_test(&vlan->refcount)) {
+               spin_lock_bh(&vlan->bat_priv->softif_vlan_list_lock);
+               hlist_del_rcu(&vlan->list);
+               spin_unlock_bh(&vlan->bat_priv->softif_vlan_list_lock);
+
+               kfree_rcu(vlan, rcu);
+       }
 }
 
 /**
@@ -505,6 +510,7 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
        if (!vlan)
                return -ENOMEM;
 
+       vlan->bat_priv = bat_priv;
        vlan->vid = vid;
        atomic_set(&vlan->refcount, 1);
 
@@ -516,6 +522,10 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
                return err;
        }
 
+       spin_lock_bh(&bat_priv->softif_vlan_list_lock);
+       hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list);
+       spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
+
        /* add a new TT local entry. This one will be marked with the NOPURGE
         * flag
         */
@@ -523,10 +533,6 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
                            bat_priv->soft_iface->dev_addr, vid,
                            BATADV_NULL_IFINDEX, BATADV_NO_MARK);
 
-       spin_lock_bh(&bat_priv->softif_vlan_list_lock);
-       hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list);
-       spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
-
        return 0;
 }
 
@@ -538,18 +544,13 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
 static void batadv_softif_destroy_vlan(struct batadv_priv *bat_priv,
                                       struct batadv_softif_vlan *vlan)
 {
-       spin_lock_bh(&bat_priv->softif_vlan_list_lock);
-       hlist_del_rcu(&vlan->list);
-       spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
-
-       batadv_sysfs_del_vlan(bat_priv, vlan);
-
        /* explicitly remove the associated TT local entry because it is marked
         * with the NOPURGE flag
         */
        batadv_tt_local_remove(bat_priv, bat_priv->soft_iface->dev_addr,
                               vlan->vid, "vlan interface destroyed", false);
 
+       batadv_sysfs_del_vlan(bat_priv, vlan);
        batadv_softif_vlan_free_ref(vlan);
 }
 
@@ -567,6 +568,8 @@ static int batadv_interface_add_vid(struct net_device *dev, __be16 proto,
                                    unsigned short vid)
 {
        struct batadv_priv *bat_priv = netdev_priv(dev);
+       struct batadv_softif_vlan *vlan;
+       int ret;
 
        /* only 802.1Q vlans are supported.
         * batman-adv does not know how to handle other types
@@ -576,7 +579,36 @@ static int batadv_interface_add_vid(struct net_device *dev, __be16 proto,
 
        vid |= BATADV_VLAN_HAS_TAG;
 
-       return batadv_softif_create_vlan(bat_priv, vid);
+       /* if a new vlan is getting created and it already exists, it means that
+        * it was not deleted yet. batadv_softif_vlan_get() increases the
+        * refcount in order to revive the object.
+        *
+        * if it does not exist then create it.
+        */
+       vlan = batadv_softif_vlan_get(bat_priv, vid);
+       if (!vlan)
+               return batadv_softif_create_vlan(bat_priv, vid);
+
+       /* recreate the sysfs object if it was already destroyed (and it should
+        * be since we received a kill_vid() for this vlan
+        */
+       if (!vlan->kobj) {
+               ret = batadv_sysfs_add_vlan(bat_priv->soft_iface, vlan);
+               if (ret) {
+                       batadv_softif_vlan_free_ref(vlan);
+                       return ret;
+               }
+       }
+
+       /* add a new TT local entry. This one will be marked with the NOPURGE
+        * flag. This must be added again, even if the vlan object already
+        * exists, because the entry was deleted by kill_vid()
+        */
+       batadv_tt_local_add(bat_priv->soft_iface,
+                           bat_priv->soft_iface->dev_addr, vid,
+                           BATADV_NULL_IFINDEX, BATADV_NO_MARK);
+
+       return 0;
 }
 
 /**
index d636bde72c9ace9cfbcead01353c955f17923155..5f59e7f899a0179a544764207468c6b4b336a237 100644 (file)
@@ -511,6 +511,7 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
        struct batadv_priv *bat_priv = netdev_priv(soft_iface);
        struct batadv_tt_local_entry *tt_local;
        struct batadv_tt_global_entry *tt_global = NULL;
+       struct batadv_softif_vlan *vlan;
        struct net_device *in_dev = NULL;
        struct hlist_head *head;
        struct batadv_tt_orig_list_entry *orig_entry;
@@ -572,6 +573,9 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
        if (!tt_local)
                goto out;
 
+       /* increase the refcounter of the related vlan */
+       vlan = batadv_softif_vlan_get(bat_priv, vid);
+
        batadv_dbg(BATADV_DBG_TT, bat_priv,
                   "Creating new local tt entry: %pM (vid: %d, ttvn: %d)\n",
                   addr, BATADV_PRINT_VID(vid),
@@ -604,6 +608,7 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
        if (unlikely(hash_added != 0)) {
                /* remove the reference for the hash */
                batadv_tt_local_entry_free_ref(tt_local);
+               batadv_softif_vlan_free_ref(vlan);
                goto out;
        }
 
@@ -1009,6 +1014,7 @@ uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv,
 {
        struct batadv_tt_local_entry *tt_local_entry;
        uint16_t flags, curr_flags = BATADV_NO_FLAGS;
+       struct batadv_softif_vlan *vlan;
 
        tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid);
        if (!tt_local_entry)
@@ -1039,6 +1045,11 @@ uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv,
        hlist_del_rcu(&tt_local_entry->common.hash_entry);
        batadv_tt_local_entry_free_ref(tt_local_entry);
 
+       /* decrease the reference held for this vlan */
+       vlan = batadv_softif_vlan_get(bat_priv, vid);
+       batadv_softif_vlan_free_ref(vlan);
+       batadv_softif_vlan_free_ref(vlan);
+
 out:
        if (tt_local_entry)
                batadv_tt_local_entry_free_ref(tt_local_entry);
@@ -1111,6 +1122,7 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
        spinlock_t *list_lock; /* protects write access to the hash lists */
        struct batadv_tt_common_entry *tt_common_entry;
        struct batadv_tt_local_entry *tt_local;
+       struct batadv_softif_vlan *vlan;
        struct hlist_node *node_tmp;
        struct hlist_head *head;
        uint32_t i;
@@ -1131,6 +1143,13 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
                        tt_local = container_of(tt_common_entry,
                                                struct batadv_tt_local_entry,
                                                common);
+
+                       /* decrease the reference held for this vlan */
+                       vlan = batadv_softif_vlan_get(bat_priv,
+                                                     tt_common_entry->vid);
+                       batadv_softif_vlan_free_ref(vlan);
+                       batadv_softif_vlan_free_ref(vlan);
+
                        batadv_tt_local_entry_free_ref(tt_local);
                }
                spin_unlock_bh(list_lock);
@@ -3139,6 +3158,7 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
        struct batadv_hashtable *hash = bat_priv->tt.local_hash;
        struct batadv_tt_common_entry *tt_common;
        struct batadv_tt_local_entry *tt_local;
+       struct batadv_softif_vlan *vlan;
        struct hlist_node *node_tmp;
        struct hlist_head *head;
        spinlock_t *list_lock; /* protects write access to the hash lists */
@@ -3167,6 +3187,12 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
                        tt_local = container_of(tt_common,
                                                struct batadv_tt_local_entry,
                                                common);
+
+                       /* decrease the reference held for this vlan */
+                       vlan = batadv_softif_vlan_get(bat_priv, tt_common->vid);
+                       batadv_softif_vlan_free_ref(vlan);
+                       batadv_softif_vlan_free_ref(vlan);
+
                        batadv_tt_local_entry_free_ref(tt_local);
                }
                spin_unlock_bh(list_lock);
index 34891a56773f09ebcccab01fe3191b1a56651aed..8854c05622a9bae2b8f30b0cf91686e8c629a849 100644 (file)
@@ -687,6 +687,7 @@ struct batadv_priv_nc {
 
 /**
  * struct batadv_softif_vlan - per VLAN attributes set
+ * @bat_priv: pointer to the mesh object
  * @vid: VLAN identifier
  * @kobj: kobject for sysfs vlan subdirectory
  * @ap_isolation: AP isolation state
@@ -696,6 +697,7 @@ struct batadv_priv_nc {
  * @rcu: struct used for freeing in a RCU-safe manner
  */
 struct batadv_softif_vlan {
+       struct batadv_priv *bat_priv;
        unsigned short vid;
        struct kobject *kobj;
        atomic_t ap_isolation;          /* boolean */
index ca01d18618549e2ef6caf5783bc9f2c7153a215b..a7a27bc2c0b1d8a7200e0a627c69b329e08cf838 100644 (file)
@@ -289,10 +289,20 @@ static void hci_conn_timeout(struct work_struct *work)
 {
        struct hci_conn *conn = container_of(work, struct hci_conn,
                                             disc_work.work);
+       int refcnt = atomic_read(&conn->refcnt);
 
        BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
 
-       if (atomic_read(&conn->refcnt))
+       WARN_ON(refcnt < 0);
+
+       /* FIXME: It was observed that in pairing failed scenario, refcnt
+        * drops below 0. Probably this is because l2cap_conn_del calls
+        * l2cap_chan_del for each channel, and inside l2cap_chan_del conn is
+        * dropped. After that loop hci_chan_del is called which also drops
+        * conn. For now make sure that ACL is alive if refcnt is higher then 0,
+        * otherwise drop it.
+        */
+       if (refcnt > 0)
                return;
 
        switch (conn->state) {
index f2829a7932e24162063596d0057b590a8e225aa2..e33a982161c1db063b5cb96f06a10babb5f0b436 100644 (file)
@@ -385,6 +385,16 @@ static const u8 gen_method[5][5] = {
        { CFM_PASSKEY, CFM_PASSKEY, REQ_PASSKEY, JUST_WORKS, OVERLAP     },
 };
 
+static u8 get_auth_method(struct smp_chan *smp, u8 local_io, u8 remote_io)
+{
+       /* If either side has unknown io_caps, use JUST WORKS */
+       if (local_io > SMP_IO_KEYBOARD_DISPLAY ||
+           remote_io > SMP_IO_KEYBOARD_DISPLAY)
+               return JUST_WORKS;
+
+       return gen_method[remote_io][local_io];
+}
+
 static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
                                                u8 local_io, u8 remote_io)
 {
@@ -401,14 +411,11 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
        BT_DBG("tk_request: auth:%d lcl:%d rem:%d", auth, local_io, remote_io);
 
        /* If neither side wants MITM, use JUST WORKS */
-       /* If either side has unknown io_caps, use JUST WORKS */
        /* Otherwise, look up method from the table */
-       if (!(auth & SMP_AUTH_MITM) ||
-           local_io > SMP_IO_KEYBOARD_DISPLAY ||
-           remote_io > SMP_IO_KEYBOARD_DISPLAY)
+       if (!(auth & SMP_AUTH_MITM))
                method = JUST_WORKS;
        else
-               method = gen_method[remote_io][local_io];
+               method = get_auth_method(smp, local_io, remote_io);
 
        /* If not bonding, don't ask user to confirm a Zero TK */
        if (!(auth & SMP_AUTH_BONDING) && method == JUST_CFM)
@@ -669,7 +676,7 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
 {
        struct smp_cmd_pairing rsp, *req = (void *) skb->data;
        struct smp_chan *smp;
-       u8 key_size, auth;
+       u8 key_size, auth, sec_level;
        int ret;
 
        BT_DBG("conn %p", conn);
@@ -695,7 +702,19 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
        /* We didn't start the pairing, so match remote */
        auth = req->auth_req;
 
-       conn->hcon->pending_sec_level = authreq_to_seclevel(auth);
+       sec_level = authreq_to_seclevel(auth);
+       if (sec_level > conn->hcon->pending_sec_level)
+               conn->hcon->pending_sec_level = sec_level;
+
+       /* If we need MITM check that it can be acheived */
+       if (conn->hcon->pending_sec_level >= BT_SECURITY_HIGH) {
+               u8 method;
+
+               method = get_auth_method(smp, conn->hcon->io_capability,
+                                        req->io_capability);
+               if (method == JUST_WORKS || method == JUST_CFM)
+                       return SMP_AUTH_REQUIREMENTS;
+       }
 
        build_pairing_cmd(conn, req, &rsp, auth);
 
@@ -743,6 +762,16 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
        if (check_enc_key_size(conn, key_size))
                return SMP_ENC_KEY_SIZE;
 
+       /* If we need MITM check that it can be acheived */
+       if (conn->hcon->pending_sec_level >= BT_SECURITY_HIGH) {
+               u8 method;
+
+               method = get_auth_method(smp, req->io_capability,
+                                        rsp->io_capability);
+               if (method == JUST_WORKS || method == JUST_CFM)
+                       return SMP_AUTH_REQUIREMENTS;
+       }
+
        get_random_bytes(smp->prnd, sizeof(smp->prnd));
 
        smp->prsp[0] = SMP_CMD_PAIRING_RSP;
@@ -838,6 +867,7 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
        struct smp_cmd_pairing cp;
        struct hci_conn *hcon = conn->hcon;
        struct smp_chan *smp;
+       u8 sec_level;
 
        BT_DBG("conn %p", conn);
 
@@ -847,7 +877,9 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
        if (!(conn->hcon->link_mode & HCI_LM_MASTER))
                return SMP_CMD_NOTSUPP;
 
-       hcon->pending_sec_level = authreq_to_seclevel(rp->auth_req);
+       sec_level = authreq_to_seclevel(rp->auth_req);
+       if (sec_level > hcon->pending_sec_level)
+               hcon->pending_sec_level = sec_level;
 
        if (smp_ltk_encrypt(conn, hcon->pending_sec_level))
                return 0;
@@ -901,9 +933,12 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
        if (smp_sufficient_security(hcon, sec_level))
                return 1;
 
+       if (sec_level > hcon->pending_sec_level)
+               hcon->pending_sec_level = sec_level;
+
        if (hcon->link_mode & HCI_LM_MASTER)
-               if (smp_ltk_encrypt(conn, sec_level))
-                       goto done;
+               if (smp_ltk_encrypt(conn, hcon->pending_sec_level))
+                       return 0;
 
        if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags))
                return 0;
@@ -918,7 +953,7 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
         * requires it.
         */
        if (hcon->io_capability != HCI_IO_NO_INPUT_OUTPUT ||
-           sec_level > BT_SECURITY_MEDIUM)
+           hcon->pending_sec_level > BT_SECURITY_MEDIUM)
                authreq |= SMP_AUTH_MITM;
 
        if (hcon->link_mode & HCI_LM_MASTER) {
@@ -937,9 +972,6 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
 
        set_bit(SMP_FLAG_INITIATOR, &smp->flags);
 
-done:
-       hcon->pending_sec_level = sec_level;
-
        return 0;
 }
 
index 30eedf6779138d77ae9c54ca5efa8bf57587e7ac..367a586d0c8a851ad0b5ea57fab32dfe894d662c 100644 (file)
@@ -148,6 +148,9 @@ struct list_head ptype_all __read_mostly;   /* Taps */
 static struct list_head offload_base __read_mostly;
 
 static int netif_rx_internal(struct sk_buff *skb);
+static int call_netdevice_notifiers_info(unsigned long val,
+                                        struct net_device *dev,
+                                        struct netdev_notifier_info *info);
 
 /*
  * The @dev_base_head list is protected by @dev_base_lock and the rtnl
@@ -1207,7 +1210,11 @@ EXPORT_SYMBOL(netdev_features_change);
 void netdev_state_change(struct net_device *dev)
 {
        if (dev->flags & IFF_UP) {
-               call_netdevice_notifiers(NETDEV_CHANGE, dev);
+               struct netdev_notifier_change_info change_info;
+
+               change_info.flags_changed = 0;
+               call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
+                                             &change_info.info);
                rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
        }
 }
@@ -4089,6 +4096,8 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
        skb->vlan_tci = 0;
        skb->dev = napi->dev;
        skb->skb_iif = 0;
+       skb->encapsulation = 0;
+       skb_shinfo(skb)->gso_type = 0;
        skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
 
        napi->skb = skb;
@@ -4227,9 +4236,8 @@ static int process_backlog(struct napi_struct *napi, int quota)
 #endif
        napi->weight = weight_p;
        local_irq_disable();
-       while (work < quota) {
+       while (1) {
                struct sk_buff *skb;
-               unsigned int qlen;
 
                while ((skb = __skb_dequeue(&sd->process_queue))) {
                        local_irq_enable();
@@ -4243,24 +4251,24 @@ static int process_backlog(struct napi_struct *napi, int quota)
                }
 
                rps_lock(sd);
-               qlen = skb_queue_len(&sd->input_pkt_queue);
-               if (qlen)
-                       skb_queue_splice_tail_init(&sd->input_pkt_queue,
-                                                  &sd->process_queue);
-
-               if (qlen < quota - work) {
+               if (skb_queue_empty(&sd->input_pkt_queue)) {
                        /*
                         * Inline a custom version of __napi_complete().
                         * only current cpu owns and manipulates this napi,
-                        * and NAPI_STATE_SCHED is the only possible flag set on backlog.
-                        * we can use a plain write instead of clear_bit(),
+                        * and NAPI_STATE_SCHED is the only possible flag set
+                        * on backlog.
+                        * We can use a plain write instead of clear_bit(),
                         * and we dont need an smp_mb() memory barrier.
                         */
                        list_del(&napi->poll_list);
                        napi->state = 0;
+                       rps_unlock(sd);
 
-                       quota = work + qlen;
+                       break;
                }
+
+               skb_queue_splice_tail_init(&sd->input_pkt_queue,
+                                          &sd->process_queue);
                rps_unlock(sd);
        }
        local_irq_enable();
index 32d872eec7f5c535221898cdb45ab8f235d0b4bb..559890b0f0a2c6aaac5dc16878e2a4ff16a55933 100644 (file)
@@ -3059,11 +3059,12 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
                memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
                       sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
        } else {
+               struct neigh_table *tbl = p->tbl;
                dev_name_source = "default";
-               t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = (int *)(p + 1);
-               t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = (int *)(p + 1) + 1;
-               t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = (int *)(p + 1) + 2;
-               t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = (int *)(p + 1) + 3;
+               t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &tbl->gc_interval;
+               t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &tbl->gc_thresh1;
+               t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &tbl->gc_thresh2;
+               t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &tbl->gc_thresh3;
        }
 
        if (handler) {
index 9acec61f54334f146d87711f145f1992dbf2c360..dd8696a3dbec9b907cc9fa3744a8bf37e8efca93 100644 (file)
@@ -150,7 +150,7 @@ int dns_query(const char *type, const char *name, size_t namelen,
                goto put;
 
        memcpy(*_result, upayload->data, len);
-       *_result[len] = '\0';
+       (*_result)[len] = '\0';
 
        if (_expiry)
                *_expiry = rkey->expiry;
index d5e6836cf772d677271c46681ffa442baa9d0c99..d156b3c5f3631f169f179bfdd534b69ce5070b82 100644 (file)
@@ -1429,6 +1429,9 @@ static int inet_gro_complete(struct sk_buff *skb, int nhoff)
        int proto = iph->protocol;
        int err = -ENOSYS;
 
+       if (skb->encapsulation)
+               skb_set_inner_network_header(skb, nhoff);
+
        csum_replace2(&iph->check, iph->tot_len, newlen);
        iph->tot_len = newlen;
 
index 4e9619bca732b869a70ac0db32c8b1285c9446fa..0485bf7f8f030d59bc6e9ee499051e99d9ab53d6 100644 (file)
@@ -68,6 +68,7 @@ void gre_build_header(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
 
        skb_push(skb, hdr_len);
 
+       skb_reset_transport_header(skb);
        greh = (struct gre_base_hdr *)skb->data;
        greh->flags = tnl_flags_to_gre_flags(tpi->flags);
        greh->protocol = tpi->proto;
index eb92deb12666fb56b6a289c55b8205fc27117933..f0bdd47bbbcb5f420e9c3e406ca9dd1e83d0e554 100644 (file)
@@ -263,6 +263,9 @@ static int gre_gro_complete(struct sk_buff *skb, int nhoff)
        int err = -ENOENT;
        __be16 type;
 
+       skb->encapsulation = 1;
+       skb_shinfo(skb)->gso_type = SKB_GSO_GRE;
+
        type = greh->protocol;
        if (greh->flags & GRE_KEY)
                grehlen += GRE_HEADER_SECTION;
index 79c3d947a48128a8a58b58776e99f3a6602d868c..42b7bcf8045be90924d31ab26b7d4ff49b87cad9 100644 (file)
@@ -739,8 +739,6 @@ static void icmp_unreach(struct sk_buff *skb)
                                /* fall through */
                        case 0:
                                info = ntohs(icmph->un.frag.mtu);
-                               if (!info)
-                                       goto out;
                        }
                        break;
                case ICMP_SR_FAILED:
index 6748d420f714f4add6acd23a4aefd1f35c0c067b..db710b059bab35d637bae71bf813b74f58ef3b66 100644 (file)
@@ -1944,6 +1944,10 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
 
        rtnl_lock();
        in_dev = ip_mc_find_dev(net, imr);
+       if (!in_dev) {
+               ret = -ENODEV;
+               goto out;
+       }
        ifindex = imr->imr_ifindex;
        for (imlp = &inet->mc_list;
             (iml = rtnl_dereference(*imlp)) != NULL;
@@ -1961,16 +1965,14 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
 
                *imlp = iml->next_rcu;
 
-               if (in_dev)
-                       ip_mc_dec_group(in_dev, group);
+               ip_mc_dec_group(in_dev, group);
                rtnl_unlock();
                /* decrease mem now to avoid the memleak warning */
                atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
                kfree_rcu(iml, rcu);
                return 0;
        }
-       if (!in_dev)
-               ret = -ENODEV;
+out:
        rtnl_unlock();
        return ret;
 }
index 5e7aecea05cd2afbd3e3e13f417e26687517b468..ad382499bace4ce3852c2953bb262f2796d9a416 100644 (file)
@@ -288,6 +288,10 @@ int ip_options_compile(struct net *net,
                        optptr++;
                        continue;
                }
+               if (unlikely(l < 2)) {
+                       pp_ptr = optptr;
+                       goto error;
+               }
                optlen = optptr[1];
                if (optlen < 2 || optlen > l) {
                        pp_ptr = optptr;
index 54b6731dab559e2c686bfd224436ff9f72d4546c..6f9de61dce5f9585625443af1d372ddb931adf03 100644 (file)
@@ -169,6 +169,7 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
 
        hlist_for_each_entry_rcu(t, head, hash_node) {
                if (remote != t->parms.iph.daddr ||
+                   t->parms.iph.saddr != 0 ||
                    !(t->dev->flags & IFF_UP))
                        continue;
 
@@ -185,10 +186,11 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
        head = &itn->tunnels[hash];
 
        hlist_for_each_entry_rcu(t, head, hash_node) {
-               if ((local != t->parms.iph.saddr &&
-                    (local != t->parms.iph.daddr ||
-                     !ipv4_is_multicast(local))) ||
-                   !(t->dev->flags & IFF_UP))
+               if ((local != t->parms.iph.saddr || t->parms.iph.daddr != 0) &&
+                   (local != t->parms.iph.daddr || !ipv4_is_multicast(local)))
+                       continue;
+
+               if (!(t->dev->flags & IFF_UP))
                        continue;
 
                if (!ip_tunnel_key_match(&t->parms, flags, key))
@@ -205,6 +207,8 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
 
        hlist_for_each_entry_rcu(t, head, hash_node) {
                if (t->parms.i_key != key ||
+                   t->parms.iph.saddr != 0 ||
+                   t->parms.iph.daddr != 0 ||
                    !(t->dev->flags & IFF_UP))
                        continue;
 
index 082239ffe34a1f42e62e9ac7901f9fe5219fdca6..3162ea923dedba7021cd67d24d721f94bf2342d0 100644 (file)
@@ -1010,7 +1010,7 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
        const struct iphdr *iph = (const struct iphdr *) skb->data;
        struct flowi4 fl4;
        struct rtable *rt;
-       struct dst_entry *dst;
+       struct dst_entry *odst = NULL;
        bool new = false;
 
        bh_lock_sock(sk);
@@ -1018,16 +1018,17 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
        if (!ip_sk_accept_pmtu(sk))
                goto out;
 
-       rt = (struct rtable *) __sk_dst_get(sk);
+       odst = sk_dst_get(sk);
 
-       if (sock_owned_by_user(sk) || !rt) {
+       if (sock_owned_by_user(sk) || !odst) {
                __ipv4_sk_update_pmtu(skb, sk, mtu);
                goto out;
        }
 
        __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
 
-       if (!__sk_dst_check(sk, 0)) {
+       rt = (struct rtable *)odst;
+       if (odst->obsolete && odst->ops->check(odst, 0) == NULL) {
                rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
                if (IS_ERR(rt))
                        goto out;
@@ -1037,8 +1038,7 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
 
        __ip_rt_update_pmtu((struct rtable *) rt->dst.path, &fl4, mtu);
 
-       dst = dst_check(&rt->dst, 0);
-       if (!dst) {
+       if (!dst_check(&rt->dst, 0)) {
                if (new)
                        dst_release(&rt->dst);
 
@@ -1050,10 +1050,11 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
        }
 
        if (new)
-               __sk_dst_set(sk, &rt->dst);
+               sk_dst_set(sk, &rt->dst);
 
 out:
        bh_unlock_sock(sk);
+       dst_release(odst);
 }
 EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
 
index eb1dde37e678f6bb1570bfb452c019b88eb1a76c..9d2118e5fbc79359e205c41b4802402d79b580f6 100644 (file)
@@ -1108,7 +1108,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
        if (unlikely(tp->repair)) {
                if (tp->repair_queue == TCP_RECV_QUEUE) {
                        copied = tcp_send_rcvq(sk, msg, size);
-                       goto out;
+                       goto out_nopush;
                }
 
                err = -EINVAL;
@@ -1282,6 +1282,7 @@ wait_for_memory:
 out:
        if (copied)
                tcp_push(sk, flags, mss_now, tp->nonagle, size_goal);
+out_nopush:
        release_sock(sk);
        return copied + copied_syn;
 
index b5c23756965ae338d1dfed57ca44be700fd2f148..40639c288dc229d205eccb257886d8867d973759 100644 (file)
@@ -1106,7 +1106,7 @@ static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
        }
 
        /* D-SACK for already forgotten data... Do dumb counting. */
-       if (dup_sack && tp->undo_marker && tp->undo_retrans &&
+       if (dup_sack && tp->undo_marker && tp->undo_retrans > 0 &&
            !after(end_seq_0, prior_snd_una) &&
            after(end_seq_0, tp->undo_marker))
                tp->undo_retrans--;
@@ -1187,7 +1187,7 @@ static u8 tcp_sacktag_one(struct sock *sk,
 
        /* Account D-SACK for retransmitted packet. */
        if (dup_sack && (sacked & TCPCB_RETRANS)) {
-               if (tp->undo_marker && tp->undo_retrans &&
+               if (tp->undo_marker && tp->undo_retrans > 0 &&
                    after(end_seq, tp->undo_marker))
                        tp->undo_retrans--;
                if (sacked & TCPCB_SACKED_ACKED)
@@ -1893,7 +1893,7 @@ static void tcp_clear_retrans_partial(struct tcp_sock *tp)
        tp->lost_out = 0;
 
        tp->undo_marker = 0;
-       tp->undo_retrans = 0;
+       tp->undo_retrans = -1;
 }
 
 void tcp_clear_retrans(struct tcp_sock *tp)
@@ -2665,7 +2665,7 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack)
 
        tp->prior_ssthresh = 0;
        tp->undo_marker = tp->snd_una;
-       tp->undo_retrans = tp->retrans_out;
+       tp->undo_retrans = tp->retrans_out ? : -1;
 
        if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
                if (!ece_ack)
index 4e86c59ec7f7f07fe06c6db20d17851da6f1563f..55046ecd083ea8afb964205eb8ea38e2bbe91708 100644 (file)
@@ -309,7 +309,7 @@ static int tcp4_gro_complete(struct sk_buff *skb, int thoff)
 
        th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
                                  iph->daddr, 0);
-       skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+       skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
 
        return tcp_gro_complete(skb);
 }
index d92bce0ea24ec54fb559941979906337d68881ad..179b51e6bda339f37a386d5118e1f15f41bbccf7 100644 (file)
@@ -2525,8 +2525,6 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
                if (!tp->retrans_stamp)
                        tp->retrans_stamp = TCP_SKB_CB(skb)->when;
 
-               tp->undo_retrans += tcp_skb_pcount(skb);
-
                /* snd_nxt is stored to detect loss of retransmitted segment,
                 * see tcp_input.c tcp_sacktag_write_queue().
                 */
@@ -2534,6 +2532,10 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
        } else if (err != -EBUSY) {
                NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL);
        }
+
+       if (tp->undo_retrans < 0)
+               tp->undo_retrans = 0;
+       tp->undo_retrans += tcp_skb_pcount(skb);
        return err;
 }
 
index d92f94b7e4025dd4779e75e6a75f2de560713778..7d5a8661df769d95e05c8214ecd5afc8f0144d26 100644 (file)
@@ -1588,8 +1588,11 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
                goto csum_error;
 
 
-       if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf))
+       if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
+               UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
+                                is_udplite);
                goto drop;
+       }
 
        rc = 0;
 
index 08b367c6b9cfe2cb268cf7ec603ecc8f5c588a86..617f0958e164e7893ca70e80e09d1ed933c95b69 100644 (file)
@@ -1301,8 +1301,17 @@ int igmp6_event_query(struct sk_buff *skb)
        len = ntohs(ipv6_hdr(skb)->payload_len) + sizeof(struct ipv6hdr);
        len -= skb_network_header_len(skb);
 
-       /* Drop queries with not link local source */
-       if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL))
+       /* RFC3810 6.2
+        * Upon reception of an MLD message that contains a Query, the node
+        * checks if the source address of the message is a valid link-local
+        * address, if the Hop Limit is set to 1, and if the Router Alert
+        * option is present in the Hop-By-Hop Options header of the IPv6
+        * packet.  If any of these checks fails, the packet is dropped.
+        */
+       if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL) ||
+           ipv6_hdr(skb)->hop_limit != 1 ||
+           !(IP6CB(skb)->flags & IP6SKB_ROUTERALERT) ||
+           IP6CB(skb)->ra != htons(IPV6_OPT_ROUTERALERT_MLD))
                return -EINVAL;
 
        idev = __in6_dev_get(skb->dev);
index 8517d3cd1aed460bbfb1bfb0f515924f008b790d..01b0ff9a0c2c00d6734537254e2150edcbcb64d7 100644 (file)
@@ -73,7 +73,7 @@ static int tcp6_gro_complete(struct sk_buff *skb, int thoff)
 
        th->check = ~tcp_v6_check(skb->len - thoff, &iph->saddr,
                                  &iph->daddr, 0);
-       skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+       skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6;
 
        return tcp_gro_complete(skb);
 }
index 95c8347992882e5cbef7719b0ae940fc659af057..7092ff78fd8498e1cf84a20091b92d6c867a64e6 100644 (file)
@@ -674,8 +674,11 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
                        goto csum_error;
        }
 
-       if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf))
+       if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
+               UDP6_INC_STATS_BH(sock_net(sk),
+                                 UDP_MIB_RCVBUFERRORS, is_udplite);
                goto drop;
+       }
 
        skb_dst_drop(skb);
 
@@ -690,6 +693,7 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
        bh_unlock_sock(sk);
 
        return rc;
+
 csum_error:
        UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
 drop:
index 950909f04ee6ab598a0bdd16f8d6ad6ec2a931ed..13752d96275e8b9142539a201ea1ac6f45f883ba 100644 (file)
@@ -1365,7 +1365,7 @@ static int pppol2tp_setsockopt(struct socket *sock, int level, int optname,
        int err;
 
        if (level != SOL_PPPOL2TP)
-               return udp_prot.setsockopt(sk, level, optname, optval, optlen);
+               return -EINVAL;
 
        if (optlen < sizeof(int))
                return -EINVAL;
@@ -1491,7 +1491,7 @@ static int pppol2tp_getsockopt(struct socket *sock, int level, int optname,
        struct pppol2tp_session *ps;
 
        if (level != SOL_PPPOL2TP)
-               return udp_prot.getsockopt(sk, level, optname, optval, optlen);
+               return -EINVAL;
 
        if (get_user(len, optlen))
                return -EFAULT;
index 6886601afe1c731c3cc7b5409745307b2f48e67c..a6cda52ed9203e55047841f1b4a62ab301ecb26e 100644 (file)
@@ -1096,11 +1096,12 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
        int err;
 
        /* 24 + 6 = header + auth_algo + auth_transaction + status_code */
-       skb = dev_alloc_skb(local->hw.extra_tx_headroom + 24 + 6 + extra_len);
+       skb = dev_alloc_skb(local->hw.extra_tx_headroom + IEEE80211_WEP_IV_LEN +
+                           24 + 6 + extra_len + IEEE80211_WEP_ICV_LEN);
        if (!skb)
                return;
 
-       skb_reserve(skb, local->hw.extra_tx_headroom);
+       skb_reserve(skb, local->hw.extra_tx_headroom + IEEE80211_WEP_IV_LEN);
 
        mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24 + 6);
        memset(mgmt, 0, 24 + 6);
index ab4566cfcbe497beea641dff934e24dc3341096b..8746ff9a83571d97b3991dced7ad3f968c15bdfa 100644 (file)
@@ -35,7 +35,7 @@ int nft_register_afinfo(struct net *net, struct nft_af_info *afi)
 {
        INIT_LIST_HEAD(&afi->tables);
        nfnl_lock(NFNL_SUBSYS_NFTABLES);
-       list_add_tail(&afi->list, &net->nft.af_info);
+       list_add_tail_rcu(&afi->list, &net->nft.af_info);
        nfnl_unlock(NFNL_SUBSYS_NFTABLES);
        return 0;
 }
@@ -51,7 +51,7 @@ EXPORT_SYMBOL_GPL(nft_register_afinfo);
 void nft_unregister_afinfo(struct nft_af_info *afi)
 {
        nfnl_lock(NFNL_SUBSYS_NFTABLES);
-       list_del(&afi->list);
+       list_del_rcu(&afi->list);
        nfnl_unlock(NFNL_SUBSYS_NFTABLES);
 }
 EXPORT_SYMBOL_GPL(nft_unregister_afinfo);
@@ -277,11 +277,14 @@ static int nf_tables_dump_tables(struct sk_buff *skb,
        struct net *net = sock_net(skb->sk);
        int family = nfmsg->nfgen_family;
 
-       list_for_each_entry(afi, &net->nft.af_info, list) {
+       rcu_read_lock();
+       cb->seq = net->nft.base_seq;
+
+       list_for_each_entry_rcu(afi, &net->nft.af_info, list) {
                if (family != NFPROTO_UNSPEC && family != afi->family)
                        continue;
 
-               list_for_each_entry(table, &afi->tables, list) {
+               list_for_each_entry_rcu(table, &afi->tables, list) {
                        if (idx < s_idx)
                                goto cont;
                        if (idx > s_idx)
@@ -294,11 +297,14 @@ static int nf_tables_dump_tables(struct sk_buff *skb,
                                                      NLM_F_MULTI,
                                                      afi->family, table) < 0)
                                goto done;
+
+                       nl_dump_check_consistent(cb, nlmsg_hdr(skb));
 cont:
                        idx++;
                }
        }
 done:
+       rcu_read_unlock();
        cb->args[0] = idx;
        return skb->len;
 }
@@ -407,6 +413,9 @@ static int nf_tables_updtable(struct nft_ctx *ctx)
        if (flags & ~NFT_TABLE_F_DORMANT)
                return -EINVAL;
 
+       if (flags == ctx->table->flags)
+               return 0;
+
        trans = nft_trans_alloc(ctx, NFT_MSG_NEWTABLE,
                                sizeof(struct nft_trans_table));
        if (trans == NULL)
@@ -514,7 +523,7 @@ static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb,
                module_put(afi->owner);
                return err;
        }
-       list_add_tail(&table->list, &afi->tables);
+       list_add_tail_rcu(&table->list, &afi->tables);
        return 0;
 }
 
@@ -546,7 +555,7 @@ static int nf_tables_deltable(struct sock *nlsk, struct sk_buff *skb,
        if (err < 0)
                return err;
 
-       list_del(&table->list);
+       list_del_rcu(&table->list);
        return 0;
 }
 
@@ -635,13 +644,20 @@ static int nft_dump_stats(struct sk_buff *skb, struct nft_stats __percpu *stats)
 {
        struct nft_stats *cpu_stats, total;
        struct nlattr *nest;
+       unsigned int seq;
+       u64 pkts, bytes;
        int cpu;
 
        memset(&total, 0, sizeof(total));
        for_each_possible_cpu(cpu) {
                cpu_stats = per_cpu_ptr(stats, cpu);
-               total.pkts += cpu_stats->pkts;
-               total.bytes += cpu_stats->bytes;
+               do {
+                       seq = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
+                       pkts = cpu_stats->pkts;
+                       bytes = cpu_stats->bytes;
+               } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, seq));
+               total.pkts += pkts;
+               total.bytes += bytes;
        }
        nest = nla_nest_start(skb, NFTA_CHAIN_COUNTERS);
        if (nest == NULL)
@@ -761,12 +777,15 @@ static int nf_tables_dump_chains(struct sk_buff *skb,
        struct net *net = sock_net(skb->sk);
        int family = nfmsg->nfgen_family;
 
-       list_for_each_entry(afi, &net->nft.af_info, list) {
+       rcu_read_lock();
+       cb->seq = net->nft.base_seq;
+
+       list_for_each_entry_rcu(afi, &net->nft.af_info, list) {
                if (family != NFPROTO_UNSPEC && family != afi->family)
                        continue;
 
-               list_for_each_entry(table, &afi->tables, list) {
-                       list_for_each_entry(chain, &table->chains, list) {
+               list_for_each_entry_rcu(table, &afi->tables, list) {
+                       list_for_each_entry_rcu(chain, &table->chains, list) {
                                if (idx < s_idx)
                                        goto cont;
                                if (idx > s_idx)
@@ -778,17 +797,19 @@ static int nf_tables_dump_chains(struct sk_buff *skb,
                                                              NLM_F_MULTI,
                                                              afi->family, table, chain) < 0)
                                        goto done;
+
+                               nl_dump_check_consistent(cb, nlmsg_hdr(skb));
 cont:
                                idx++;
                        }
                }
        }
 done:
+       rcu_read_unlock();
        cb->args[0] = idx;
        return skb->len;
 }
 
-
 static int nf_tables_getchain(struct sock *nlsk, struct sk_buff *skb,
                              const struct nlmsghdr *nlh,
                              const struct nlattr * const nla[])
@@ -861,7 +882,7 @@ static struct nft_stats __percpu *nft_stats_alloc(const struct nlattr *attr)
        if (!tb[NFTA_COUNTER_BYTES] || !tb[NFTA_COUNTER_PACKETS])
                return ERR_PTR(-EINVAL);
 
-       newstats = alloc_percpu(struct nft_stats);
+       newstats = netdev_alloc_pcpu_stats(struct nft_stats);
        if (newstats == NULL)
                return ERR_PTR(-ENOMEM);
 
@@ -1077,7 +1098,7 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
                        }
                        basechain->stats = stats;
                } else {
-                       stats = alloc_percpu(struct nft_stats);
+                       stats = netdev_alloc_pcpu_stats(struct nft_stats);
                        if (IS_ERR(stats)) {
                                module_put(type->owner);
                                kfree(basechain);
@@ -1130,7 +1151,7 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
                goto err2;
 
        table->use++;
-       list_add_tail(&chain->list, &table->chains);
+       list_add_tail_rcu(&chain->list, &table->chains);
        return 0;
 err2:
        if (!(table->flags & NFT_TABLE_F_DORMANT) &&
@@ -1180,7 +1201,7 @@ static int nf_tables_delchain(struct sock *nlsk, struct sk_buff *skb,
                return err;
 
        table->use--;
-       list_del(&chain->list);
+       list_del_rcu(&chain->list);
        return 0;
 }
 
@@ -1199,9 +1220,9 @@ int nft_register_expr(struct nft_expr_type *type)
 {
        nfnl_lock(NFNL_SUBSYS_NFTABLES);
        if (type->family == NFPROTO_UNSPEC)
-               list_add_tail(&type->list, &nf_tables_expressions);
+               list_add_tail_rcu(&type->list, &nf_tables_expressions);
        else
-               list_add(&type->list, &nf_tables_expressions);
+               list_add_rcu(&type->list, &nf_tables_expressions);
        nfnl_unlock(NFNL_SUBSYS_NFTABLES);
        return 0;
 }
@@ -1216,7 +1237,7 @@ EXPORT_SYMBOL_GPL(nft_register_expr);
 void nft_unregister_expr(struct nft_expr_type *type)
 {
        nfnl_lock(NFNL_SUBSYS_NFTABLES);
-       list_del(&type->list);
+       list_del_rcu(&type->list);
        nfnl_unlock(NFNL_SUBSYS_NFTABLES);
 }
 EXPORT_SYMBOL_GPL(nft_unregister_expr);
@@ -1549,16 +1570,17 @@ static int nf_tables_dump_rules(struct sk_buff *skb,
        unsigned int idx = 0, s_idx = cb->args[0];
        struct net *net = sock_net(skb->sk);
        int family = nfmsg->nfgen_family;
-       u8 genctr = ACCESS_ONCE(net->nft.genctr);
-       u8 gencursor = ACCESS_ONCE(net->nft.gencursor);
 
-       list_for_each_entry(afi, &net->nft.af_info, list) {
+       rcu_read_lock();
+       cb->seq = net->nft.base_seq;
+
+       list_for_each_entry_rcu(afi, &net->nft.af_info, list) {
                if (family != NFPROTO_UNSPEC && family != afi->family)
                        continue;
 
-               list_for_each_entry(table, &afi->tables, list) {
-                       list_for_each_entry(chain, &table->chains, list) {
-                               list_for_each_entry(rule, &chain->rules, list) {
+               list_for_each_entry_rcu(table, &afi->tables, list) {
+                       list_for_each_entry_rcu(chain, &table->chains, list) {
+                               list_for_each_entry_rcu(rule, &chain->rules, list) {
                                        if (!nft_rule_is_active(net, rule))
                                                goto cont;
                                        if (idx < s_idx)
@@ -1572,6 +1594,8 @@ static int nf_tables_dump_rules(struct sk_buff *skb,
                                                                      NLM_F_MULTI | NLM_F_APPEND,
                                                                      afi->family, table, chain, rule) < 0)
                                                goto done;
+
+                                       nl_dump_check_consistent(cb, nlmsg_hdr(skb));
 cont:
                                        idx++;
                                }
@@ -1579,9 +1603,7 @@ cont:
                }
        }
 done:
-       /* Invalidate this dump, a transition to the new generation happened */
-       if (gencursor != net->nft.gencursor || genctr != net->nft.genctr)
-               return -EBUSY;
+       rcu_read_unlock();
 
        cb->args[0] = idx;
        return skb->len;
@@ -1932,7 +1954,7 @@ static LIST_HEAD(nf_tables_set_ops);
 int nft_register_set(struct nft_set_ops *ops)
 {
        nfnl_lock(NFNL_SUBSYS_NFTABLES);
-       list_add_tail(&ops->list, &nf_tables_set_ops);
+       list_add_tail_rcu(&ops->list, &nf_tables_set_ops);
        nfnl_unlock(NFNL_SUBSYS_NFTABLES);
        return 0;
 }
@@ -1941,7 +1963,7 @@ EXPORT_SYMBOL_GPL(nft_register_set);
 void nft_unregister_set(struct nft_set_ops *ops)
 {
        nfnl_lock(NFNL_SUBSYS_NFTABLES);
-       list_del(&ops->list);
+       list_del_rcu(&ops->list);
        nfnl_unlock(NFNL_SUBSYS_NFTABLES);
 }
 EXPORT_SYMBOL_GPL(nft_unregister_set);
@@ -2234,7 +2256,10 @@ static int nf_tables_dump_sets_table(struct nft_ctx *ctx, struct sk_buff *skb,
        if (cb->args[1])
                return skb->len;
 
-       list_for_each_entry(set, &ctx->table->sets, list) {
+       rcu_read_lock();
+       cb->seq = ctx->net->nft.base_seq;
+
+       list_for_each_entry_rcu(set, &ctx->table->sets, list) {
                if (idx < s_idx)
                        goto cont;
                if (nf_tables_fill_set(skb, ctx, set, NFT_MSG_NEWSET,
@@ -2242,11 +2267,13 @@ static int nf_tables_dump_sets_table(struct nft_ctx *ctx, struct sk_buff *skb,
                        cb->args[0] = idx;
                        goto done;
                }
+               nl_dump_check_consistent(cb, nlmsg_hdr(skb));
 cont:
                idx++;
        }
        cb->args[1] = 1;
 done:
+       rcu_read_unlock();
        return skb->len;
 }
 
@@ -2260,7 +2287,10 @@ static int nf_tables_dump_sets_family(struct nft_ctx *ctx, struct sk_buff *skb,
        if (cb->args[1])
                return skb->len;
 
-       list_for_each_entry(table, &ctx->afi->tables, list) {
+       rcu_read_lock();
+       cb->seq = ctx->net->nft.base_seq;
+
+       list_for_each_entry_rcu(table, &ctx->afi->tables, list) {
                if (cur_table) {
                        if (cur_table != table)
                                continue;
@@ -2269,7 +2299,7 @@ static int nf_tables_dump_sets_family(struct nft_ctx *ctx, struct sk_buff *skb,
                }
                ctx->table = table;
                idx = 0;
-               list_for_each_entry(set, &ctx->table->sets, list) {
+               list_for_each_entry_rcu(set, &ctx->table->sets, list) {
                        if (idx < s_idx)
                                goto cont;
                        if (nf_tables_fill_set(skb, ctx, set, NFT_MSG_NEWSET,
@@ -2278,12 +2308,14 @@ static int nf_tables_dump_sets_family(struct nft_ctx *ctx, struct sk_buff *skb,
                                cb->args[2] = (unsigned long) table;
                                goto done;
                        }
+                       nl_dump_check_consistent(cb, nlmsg_hdr(skb));
 cont:
                        idx++;
                }
        }
        cb->args[1] = 1;
 done:
+       rcu_read_unlock();
        return skb->len;
 }
 
@@ -2300,7 +2332,10 @@ static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb,
        if (cb->args[1])
                return skb->len;
 
-       list_for_each_entry(afi, &net->nft.af_info, list) {
+       rcu_read_lock();
+       cb->seq = net->nft.base_seq;
+
+       list_for_each_entry_rcu(afi, &net->nft.af_info, list) {
                if (cur_family) {
                        if (afi->family != cur_family)
                                continue;
@@ -2308,7 +2343,7 @@ static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb,
                        cur_family = 0;
                }
 
-               list_for_each_entry(table, &afi->tables, list) {
+               list_for_each_entry_rcu(table, &afi->tables, list) {
                        if (cur_table) {
                                if (cur_table != table)
                                        continue;
@@ -2319,7 +2354,7 @@ static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb,
                        ctx->table = table;
                        ctx->afi = afi;
                        idx = 0;
-                       list_for_each_entry(set, &ctx->table->sets, list) {
+                       list_for_each_entry_rcu(set, &ctx->table->sets, list) {
                                if (idx < s_idx)
                                        goto cont;
                                if (nf_tables_fill_set(skb, ctx, set,
@@ -2330,6 +2365,7 @@ static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb,
                                        cb->args[3] = afi->family;
                                        goto done;
                                }
+                               nl_dump_check_consistent(cb, nlmsg_hdr(skb));
 cont:
                                idx++;
                        }
@@ -2339,6 +2375,7 @@ cont:
        }
        cb->args[1] = 1;
 done:
+       rcu_read_unlock();
        return skb->len;
 }
 
@@ -2597,7 +2634,7 @@ static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb,
        if (err < 0)
                goto err2;
 
-       list_add_tail(&set->list, &table->sets);
+       list_add_tail_rcu(&set->list, &table->sets);
        table->use++;
        return 0;
 
@@ -2617,7 +2654,7 @@ static void nft_set_destroy(struct nft_set *set)
 
 static void nf_tables_set_destroy(const struct nft_ctx *ctx, struct nft_set *set)
 {
-       list_del(&set->list);
+       list_del_rcu(&set->list);
        nf_tables_set_notify(ctx, set, NFT_MSG_DELSET, GFP_ATOMIC);
        nft_set_destroy(set);
 }
@@ -2652,7 +2689,7 @@ static int nf_tables_delset(struct sock *nlsk, struct sk_buff *skb,
        if (err < 0)
                return err;
 
-       list_del(&set->list);
+       list_del_rcu(&set->list);
        ctx.table->use--;
        return 0;
 }
@@ -2704,14 +2741,14 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
        }
 bind:
        binding->chain = ctx->chain;
-       list_add_tail(&binding->list, &set->bindings);
+       list_add_tail_rcu(&binding->list, &set->bindings);
        return 0;
 }
 
 void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
                          struct nft_set_binding *binding)
 {
-       list_del(&binding->list);
+       list_del_rcu(&binding->list);
 
        if (list_empty(&set->bindings) && set->flags & NFT_SET_ANONYMOUS &&
            !(set->flags & NFT_SET_INACTIVE))
@@ -3346,7 +3383,7 @@ static int nf_tables_commit(struct sk_buff *skb)
        struct nft_set *set;
 
        /* Bump generation counter, invalidate any dump in progress */
-       net->nft.genctr++;
+       while (++net->nft.base_seq == 0);
 
        /* A new generation has just started */
        net->nft.gencursor = gencursor_next(net);
@@ -3491,12 +3528,12 @@ static int nf_tables_abort(struct sk_buff *skb)
                                }
                                nft_trans_destroy(trans);
                        } else {
-                               list_del(&trans->ctx.table->list);
+                               list_del_rcu(&trans->ctx.table->list);
                        }
                        break;
                case NFT_MSG_DELTABLE:
-                       list_add_tail(&trans->ctx.table->list,
-                                     &trans->ctx.afi->tables);
+                       list_add_tail_rcu(&trans->ctx.table->list,
+                                         &trans->ctx.afi->tables);
                        nft_trans_destroy(trans);
                        break;
                case NFT_MSG_NEWCHAIN:
@@ -3507,7 +3544,7 @@ static int nf_tables_abort(struct sk_buff *skb)
                                nft_trans_destroy(trans);
                        } else {
                                trans->ctx.table->use--;
-                               list_del(&trans->ctx.chain->list);
+                               list_del_rcu(&trans->ctx.chain->list);
                                if (!(trans->ctx.table->flags & NFT_TABLE_F_DORMANT) &&
                                    trans->ctx.chain->flags & NFT_BASE_CHAIN) {
                                        nf_unregister_hooks(nft_base_chain(trans->ctx.chain)->ops,
@@ -3517,8 +3554,8 @@ static int nf_tables_abort(struct sk_buff *skb)
                        break;
                case NFT_MSG_DELCHAIN:
                        trans->ctx.table->use++;
-                       list_add_tail(&trans->ctx.chain->list,
-                                     &trans->ctx.table->chains);
+                       list_add_tail_rcu(&trans->ctx.chain->list,
+                                         &trans->ctx.table->chains);
                        nft_trans_destroy(trans);
                        break;
                case NFT_MSG_NEWRULE:
@@ -3532,12 +3569,12 @@ static int nf_tables_abort(struct sk_buff *skb)
                        break;
                case NFT_MSG_NEWSET:
                        trans->ctx.table->use--;
-                       list_del(&nft_trans_set(trans)->list);
+                       list_del_rcu(&nft_trans_set(trans)->list);
                        break;
                case NFT_MSG_DELSET:
                        trans->ctx.table->use++;
-                       list_add_tail(&nft_trans_set(trans)->list,
-                                     &trans->ctx.table->sets);
+                       list_add_tail_rcu(&nft_trans_set(trans)->list,
+                                         &trans->ctx.table->sets);
                        nft_trans_destroy(trans);
                        break;
                case NFT_MSG_NEWSETELEM:
@@ -3951,6 +3988,7 @@ static int nf_tables_init_net(struct net *net)
 {
        INIT_LIST_HEAD(&net->nft.af_info);
        INIT_LIST_HEAD(&net->nft.commit_list);
+       net->nft.base_seq = 1;
        return 0;
 }
 
index 345acfb1720b14f00aae0e5937ab07bfb90e9482..3b90eb2b2c55453e989c891a3f815be6e1da22d1 100644 (file)
@@ -109,7 +109,7 @@ nft_do_chain(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops)
        struct nft_data data[NFT_REG_MAX + 1];
        unsigned int stackptr = 0;
        struct nft_jumpstack jumpstack[NFT_JUMP_STACK_SIZE];
-       struct nft_stats __percpu *stats;
+       struct nft_stats *stats;
        int rulenum;
        /*
         * Cache cursor to avoid problems in case that the cursor is updated
@@ -205,9 +205,11 @@ next_rule:
                nft_trace_packet(pkt, basechain, -1, NFT_TRACE_POLICY);
 
        rcu_read_lock_bh();
-       stats = rcu_dereference(nft_base_chain(basechain)->stats);
-       __this_cpu_inc(stats->pkts);
-       __this_cpu_add(stats->bytes, pkt->skb->len);
+       stats = this_cpu_ptr(rcu_dereference(nft_base_chain(basechain)->stats));
+       u64_stats_update_begin(&stats->syncp);
+       stats->pkts++;
+       stats->bytes += pkt->skb->len;
+       u64_stats_update_end(&stats->syncp);
        rcu_read_unlock_bh();
 
        return nft_base_chain(basechain)->policy;
index 15c731f03fa664a64f7bf3cdde36cf1a8e4150b6..e6fac7e3db52e5fcb40629a60472ff2c7aa72dcb 100644 (file)
@@ -636,7 +636,7 @@ static unsigned int netlink_poll(struct file *file, struct socket *sock,
                while (nlk->cb_running && netlink_dump_space(nlk)) {
                        err = netlink_dump(sk);
                        if (err < 0) {
-                               sk->sk_err = err;
+                               sk->sk_err = -err;
                                sk->sk_error_report(sk);
                                break;
                        }
@@ -2483,7 +2483,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
            atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
                ret = netlink_dump(sk);
                if (ret) {
-                       sk->sk_err = ret;
+                       sk->sk_err = -ret;
                        sk->sk_error_report(sk);
                }
        }
index c36856a457ca963c735e89e36478a53ba60bb453..e70d8b18e96290af1435ef8423c9d71e5496df2d 100644 (file)
@@ -551,6 +551,8 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
 
                case OVS_ACTION_ATTR_SAMPLE:
                        err = sample(dp, skb, a);
+                       if (unlikely(err)) /* skb already freed. */
+                               return err;
                        break;
                }
 
index 0d407bca81e3573983bc47791dddf5561d4f8ee1..9db4bf6740d1e1dc08c60200f1fe8f82f4a370d5 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2007-2013 Nicira, Inc.
+ * Copyright (c) 2007-2014 Nicira, Inc.
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of version 2 of the GNU General Public
@@ -276,7 +276,7 @@ void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb)
        OVS_CB(skb)->flow = flow;
        OVS_CB(skb)->pkt_key = &key;
 
-       ovs_flow_stats_update(OVS_CB(skb)->flow, skb);
+       ovs_flow_stats_update(OVS_CB(skb)->flow, key.tp.flags, skb);
        ovs_execute_actions(dp, skb);
        stats_counter = &stats->n_hit;
 
@@ -889,8 +889,11 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
                }
                /* The unmasked key has to be the same for flow updates. */
                if (unlikely(!ovs_flow_cmp_unmasked_key(flow, &match))) {
-                       error = -EEXIST;
-                       goto err_unlock_ovs;
+                       flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
+                       if (!flow) {
+                               error = -ENOENT;
+                               goto err_unlock_ovs;
+                       }
                }
                /* Update actions. */
                old_acts = ovsl_dereference(flow->sf_acts);
@@ -981,16 +984,12 @@ static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
                goto err_unlock_ovs;
        }
        /* Check that the flow exists. */
-       flow = ovs_flow_tbl_lookup(&dp->table, &key);
+       flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
        if (unlikely(!flow)) {
                error = -ENOENT;
                goto err_unlock_ovs;
        }
-       /* The unmasked key has to be the same for flow updates. */
-       if (unlikely(!ovs_flow_cmp_unmasked_key(flow, &match))) {
-               error = -EEXIST;
-               goto err_unlock_ovs;
-       }
+
        /* Update actions, if present. */
        if (likely(acts)) {
                old_acts = ovsl_dereference(flow->sf_acts);
@@ -1063,8 +1062,8 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
                goto unlock;
        }
 
-       flow = ovs_flow_tbl_lookup(&dp->table, &key);
-       if (!flow || !ovs_flow_cmp_unmasked_key(flow, &match)) {
+       flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
+       if (!flow) {
                err = -ENOENT;
                goto unlock;
        }
@@ -1113,8 +1112,8 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
                goto unlock;
        }
 
-       flow = ovs_flow_tbl_lookup(&dp->table, &key);
-       if (unlikely(!flow || !ovs_flow_cmp_unmasked_key(flow, &match))) {
+       flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
+       if (unlikely(!flow)) {
                err = -ENOENT;
                goto unlock;
        }
index 334751cb15289c4f0ca00bedec960ee1cfe19ba0..d07ab538fc9d37b78082e88906fe41c433467166 100644 (file)
@@ -61,10 +61,10 @@ u64 ovs_flow_used_time(unsigned long flow_jiffies)
 
 #define TCP_FLAGS_BE16(tp) (*(__be16 *)&tcp_flag_word(tp) & htons(0x0FFF))
 
-void ovs_flow_stats_update(struct sw_flow *flow, struct sk_buff *skb)
+void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
+                          struct sk_buff *skb)
 {
        struct flow_stats *stats;
-       __be16 tcp_flags = flow->key.tp.flags;
        int node = numa_node_id();
 
        stats = rcu_dereference(flow->stats[node]);
index ac395d2cd821631898116b89438af7eba5d40b2f..5e5aaed3a85b0761a4831894e0c2645961790f8c 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2007-2013 Nicira, Inc.
+ * Copyright (c) 2007-2014 Nicira, Inc.
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of version 2 of the GNU General Public
@@ -180,7 +180,8 @@ struct arp_eth_header {
        unsigned char       ar_tip[4];          /* target IP address        */
 } __packed;
 
-void ovs_flow_stats_update(struct sw_flow *, struct sk_buff *);
+void ovs_flow_stats_update(struct sw_flow *, __be16 tcp_flags,
+                          struct sk_buff *);
 void ovs_flow_stats_get(const struct sw_flow *, struct ovs_flow_stats *,
                        unsigned long *used, __be16 *tcp_flags);
 void ovs_flow_stats_clear(struct sw_flow *);
index 574c3abc9b307ef6609f8f8dc09ade4b3253b814..cf2d853646f05dc61a1c8f9093782196860ab9bc 100644 (file)
@@ -456,6 +456,22 @@ struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
        return ovs_flow_tbl_lookup_stats(tbl, key, &n_mask_hit);
 }
 
+struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
+                                         struct sw_flow_match *match)
+{
+       struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
+       struct sw_flow_mask *mask;
+       struct sw_flow *flow;
+
+       /* Always called under ovs-mutex. */
+       list_for_each_entry(mask, &tbl->mask_list, list) {
+               flow = masked_flow_lookup(ti, match->key, mask);
+               if (flow && ovs_flow_cmp_unmasked_key(flow, match))  /* Found */
+                       return flow;
+       }
+       return NULL;
+}
+
 int ovs_flow_tbl_num_masks(const struct flow_table *table)
 {
        struct sw_flow_mask *mask;
index ca8a5820f6153f67fb9ad987c4c156be882c92ea..5918bff7f3f6cfee2fd58bcd15101446b44469d8 100644 (file)
@@ -76,7 +76,8 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *,
                                    u32 *n_mask_hit);
 struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *,
                                    const struct sw_flow_key *);
-
+struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
+                                         struct sw_flow_match *match);
 bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
                               struct sw_flow_match *match);
 
index 35ec4fed09e228c7e6fe889d2701cb3a3de7748a..f49148a07da29037c0799ab122b18b1d4d0599cd 100644 (file)
@@ -110,6 +110,22 @@ static int gre_rcv(struct sk_buff *skb,
        return PACKET_RCVD;
 }
 
+/* Called with rcu_read_lock and BH disabled. */
+static int gre_err(struct sk_buff *skb, u32 info,
+                  const struct tnl_ptk_info *tpi)
+{
+       struct ovs_net *ovs_net;
+       struct vport *vport;
+
+       ovs_net = net_generic(dev_net(skb->dev), ovs_net_id);
+       vport = rcu_dereference(ovs_net->vport_net.gre_vport);
+
+       if (unlikely(!vport))
+               return PACKET_REJECT;
+       else
+               return PACKET_RCVD;
+}
+
 static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
 {
        struct net *net = ovs_dp_get_net(vport->dp);
@@ -186,6 +202,7 @@ error:
 
 static struct gre_cisco_protocol gre_protocol = {
        .handler        = gre_rcv,
+       .err_handler    = gre_err,
        .priority       = 1,
 };
 
index c39b583ace3229d4bae6a7b3774593e5eebd7141..70c0be8d0121db461c1e21793a1b68d844f37e8b 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/errno.h>
 #include <linux/rtnetlink.h>
 #include <linux/skbuff.h>
+#include <linux/bitmap.h>
 #include <net/netlink.h>
 #include <net/act_api.h>
 #include <net/pkt_cls.h>
@@ -460,17 +461,25 @@ static int u32_delete(struct tcf_proto *tp, unsigned long arg)
        return 0;
 }
 
+#define NR_U32_NODE (1<<12)
 static u32 gen_new_kid(struct tc_u_hnode *ht, u32 handle)
 {
        struct tc_u_knode *n;
-       unsigned int i = 0x7FF;
+       unsigned long i;
+       unsigned long *bitmap = kzalloc(BITS_TO_LONGS(NR_U32_NODE) * sizeof(unsigned long),
+                                       GFP_KERNEL);
+       if (!bitmap)
+               return handle | 0xFFF;
 
        for (n = ht->ht[TC_U32_HASH(handle)]; n; n = n->next)
-               if (i < TC_U32_NODE(n->handle))
-                       i = TC_U32_NODE(n->handle);
-       i++;
+               set_bit(TC_U32_NODE(n->handle), bitmap);
 
-       return handle | (i > 0xFFF ? 0xFFF : i);
+       i = find_next_zero_bit(bitmap, NR_U32_NODE, 0x800);
+       if (i >= NR_U32_NODE)
+               i = find_next_zero_bit(bitmap, NR_U32_NODE, 1);
+
+       kfree(bitmap);
+       return handle | (i >= NR_U32_NODE ? 0xFFF : i);
 }
 
 static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
index 85c64658bd0b183df5c7a7fd8394df757cb0b4b0..b6842fdb53d4b09ffdafec78c2bab535e6eaad2d 100644 (file)
@@ -366,9 +366,10 @@ fail:
  * specification [SCTP] and any extensions for a list of possible
  * error formats.
  */
-struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
-       const struct sctp_association *asoc, struct sctp_chunk *chunk,
-       __u16 flags, gfp_t gfp)
+struct sctp_ulpevent *
+sctp_ulpevent_make_remote_error(const struct sctp_association *asoc,
+                               struct sctp_chunk *chunk, __u16 flags,
+                               gfp_t gfp)
 {
        struct sctp_ulpevent *event;
        struct sctp_remote_error *sre;
@@ -387,8 +388,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
        /* Copy the skb to a new skb with room for us to prepend
         * notification with.
         */
-       skb = skb_copy_expand(chunk->skb, sizeof(struct sctp_remote_error),
-                             0, gfp);
+       skb = skb_copy_expand(chunk->skb, sizeof(*sre), 0, gfp);
 
        /* Pull off the rest of the cause TLV from the chunk.  */
        skb_pull(chunk->skb, elen);
@@ -399,62 +399,21 @@ struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
        event = sctp_skb2event(skb);
        sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize);
 
-       sre = (struct sctp_remote_error *)
-               skb_push(skb, sizeof(struct sctp_remote_error));
+       sre = (struct sctp_remote_error *) skb_push(skb, sizeof(*sre));
 
        /* Trim the buffer to the right length.  */
-       skb_trim(skb, sizeof(struct sctp_remote_error) + elen);
+       skb_trim(skb, sizeof(*sre) + elen);
 
-       /* Socket Extensions for SCTP
-        * 5.3.1.3 SCTP_REMOTE_ERROR
-        *
-        * sre_type:
-        *   It should be SCTP_REMOTE_ERROR.
-        */
+       /* RFC6458, Section 6.1.3. SCTP_REMOTE_ERROR */
+       memset(sre, 0, sizeof(*sre));
        sre->sre_type = SCTP_REMOTE_ERROR;
-
-       /*
-        * Socket Extensions for SCTP
-        * 5.3.1.3 SCTP_REMOTE_ERROR
-        *
-        * sre_flags: 16 bits (unsigned integer)
-        *   Currently unused.
-        */
        sre->sre_flags = 0;
-
-       /* Socket Extensions for SCTP
-        * 5.3.1.3 SCTP_REMOTE_ERROR
-        *
-        * sre_length: sizeof (__u32)
-        *
-        * This field is the total length of the notification data,
-        * including the notification header.
-        */
        sre->sre_length = skb->len;
-
-       /* Socket Extensions for SCTP
-        * 5.3.1.3 SCTP_REMOTE_ERROR
-        *
-        * sre_error: 16 bits (unsigned integer)
-        * This value represents one of the Operational Error causes defined in
-        * the SCTP specification, in network byte order.
-        */
        sre->sre_error = cause;
-
-       /* Socket Extensions for SCTP
-        * 5.3.1.3 SCTP_REMOTE_ERROR
-        *
-        * sre_assoc_id: sizeof (sctp_assoc_t)
-        *
-        * The association id field, holds the identifier for the association.
-        * All notifications for a given association have the same association
-        * identifier.  For TCP style socket, this field is ignored.
-        */
        sctp_ulpevent_set_owner(event, asoc);
        sre->sre_assoc_id = sctp_assoc2id(asoc);
 
        return event;
-
 fail:
        return NULL;
 }
@@ -899,7 +858,9 @@ __u16 sctp_ulpevent_get_notification_type(const struct sctp_ulpevent *event)
        return notification->sn_header.sn_type;
 }
 
-/* Copy out the sndrcvinfo into a msghdr.  */
+/* RFC6458, Section 5.3.2. SCTP Header Information Structure
+ * (SCTP_SNDRCV, DEPRECATED)
+ */
 void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
                                   struct msghdr *msghdr)
 {
@@ -908,74 +869,21 @@ void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
        if (sctp_ulpevent_is_notification(event))
                return;
 
-       /* Sockets API Extensions for SCTP
-        * Section 5.2.2 SCTP Header Information Structure (SCTP_SNDRCV)
-        *
-        * sinfo_stream: 16 bits (unsigned integer)
-        *
-        * For recvmsg() the SCTP stack places the message's stream number in
-        * this value.
-       */
+       memset(&sinfo, 0, sizeof(sinfo));
        sinfo.sinfo_stream = event->stream;
-       /* sinfo_ssn: 16 bits (unsigned integer)
-        *
-        * For recvmsg() this value contains the stream sequence number that
-        * the remote endpoint placed in the DATA chunk.  For fragmented
-        * messages this is the same number for all deliveries of the message
-        * (if more than one recvmsg() is needed to read the message).
-        */
        sinfo.sinfo_ssn = event->ssn;
-       /* sinfo_ppid: 32 bits (unsigned integer)
-        *
-        * In recvmsg() this value is
-        * the same information that was passed by the upper layer in the peer
-        * application.  Please note that byte order issues are NOT accounted
-        * for and this information is passed opaquely by the SCTP stack from
-        * one end to the other.
-        */
        sinfo.sinfo_ppid = event->ppid;
-       /* sinfo_flags: 16 bits (unsigned integer)
-        *
-        * This field may contain any of the following flags and is composed of
-        * a bitwise OR of these values.
-        *
-        * recvmsg() flags:
-        *
-        * SCTP_UNORDERED - This flag is present when the message was sent
-        *                 non-ordered.
-        */
        sinfo.sinfo_flags = event->flags;
-       /* sinfo_tsn: 32 bit (unsigned integer)
-        *
-        * For the receiving side, this field holds a TSN that was
-        * assigned to one of the SCTP Data Chunks.
-        */
        sinfo.sinfo_tsn = event->tsn;
-       /* sinfo_cumtsn: 32 bit (unsigned integer)
-        *
-        * This field will hold the current cumulative TSN as
-        * known by the underlying SCTP layer.  Note this field is
-        * ignored when sending and only valid for a receive
-        * operation when sinfo_flags are set to SCTP_UNORDERED.
-        */
        sinfo.sinfo_cumtsn = event->cumtsn;
-       /* sinfo_assoc_id: sizeof (sctp_assoc_t)
-        *
-        * The association handle field, sinfo_assoc_id, holds the identifier
-        * for the association announced in the COMMUNICATION_UP notification.
-        * All notifications for a given association have the same identifier.
-        * Ignored for one-to-one style sockets.
-        */
        sinfo.sinfo_assoc_id = sctp_assoc2id(event->asoc);
-
-       /* context value that is set via SCTP_CONTEXT socket option. */
+       /* Context value that is set via SCTP_CONTEXT socket option. */
        sinfo.sinfo_context = event->asoc->default_rcv_context;
-
        /* These fields are not used while receiving. */
        sinfo.sinfo_timetolive = 0;
 
        put_cmsg(msghdr, IPPROTO_SCTP, SCTP_SNDRCV,
-                sizeof(struct sctp_sndrcvinfo), (void *)&sinfo);
+                sizeof(sinfo), &sinfo);
 }
 
 /* Do accounting for bytes received and hold a reference to the association
index 26631679a1faa4bb7b8c720dda1e48b5c68ab40e..55c6c9d3e1ceee905bd25ce09d4c8bd7c41a3412 100644 (file)
@@ -559,6 +559,7 @@ receive:
 
                buf = node->bclink.deferred_head;
                node->bclink.deferred_head = buf->next;
+               buf->next = NULL;
                node->bclink.deferred_size--;
                goto receive;
        }
index 8be6e94a1ca9790dbbde757b6bd70fe9c5abb428..0a37a472c29f9a6b51eaa00cda09526418e5d6ed 100644 (file)
@@ -101,9 +101,11 @@ int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
 }
 
 /* tipc_buf_append(): Append a buffer to the fragment list of another buffer
- * Let first buffer become head buffer
- * Returns 1 and sets *buf to headbuf if chain is complete, otherwise 0
- * Leaves headbuf pointer at NULL if failure
+ * @*headbuf: in:  NULL for first frag, otherwise value returned from prev call
+ *            out: set when successful non-complete reassembly, otherwise NULL
+ * @*buf:     in:  the buffer to append. Always defined
+ *            out: head buf after sucessful complete reassembly, otherwise NULL
+ * Returns 1 when reassembly complete, otherwise 0
  */
 int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
 {
@@ -122,6 +124,7 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
                        goto out_free;
                head = *headbuf = frag;
                skb_frag_list_init(head);
+               *buf = NULL;
                return 0;
        }
        if (!head)
@@ -150,5 +153,7 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
 out_free:
        pr_warn_ratelimited("Unable to build fragment list\n");
        kfree_skb(*buf);
+       kfree_skb(*headbuf);
+       *buf = *headbuf = NULL;
        return 0;
 }
index e9afbf10e756bd3a1ec81a6b6b7aa229d0688be7..7e3a3cef7df93b4c6936515f05f91d2ec14446ea 100644 (file)
@@ -424,7 +424,7 @@ static inline unsigned int elapsed_jiffies_msecs(unsigned long start)
        if (end >= start)
                return jiffies_to_msecs(end - start);
 
-       return jiffies_to_msecs(end + (MAX_JIFFY_OFFSET - start) + 1);
+       return jiffies_to_msecs(end + (ULONG_MAX - start) + 1);
 }
 
 void
index ba4f1723c83ad2eb094c15a8794fedb7b6f7a404..6668daf6932667bee1f80f6d4c7bdcefef36346c 100644 (file)
@@ -1497,18 +1497,17 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev,
                }
                CMD(start_p2p_device, START_P2P_DEVICE);
                CMD(set_mcast_rate, SET_MCAST_RATE);
+#ifdef CONFIG_NL80211_TESTMODE
+               CMD(testmode_cmd, TESTMODE);
+#endif
                if (state->split) {
                        CMD(crit_proto_start, CRIT_PROTOCOL_START);
                        CMD(crit_proto_stop, CRIT_PROTOCOL_STOP);
                        if (rdev->wiphy.flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH)
                                CMD(channel_switch, CHANNEL_SWITCH);
+                       CMD(set_qos_map, SET_QOS_MAP);
                }
-               CMD(set_qos_map, SET_QOS_MAP);
-
-#ifdef CONFIG_NL80211_TESTMODE
-               CMD(testmode_cmd, TESTMODE);
-#endif
-
+               /* add into the if now */
 #undef CMD
 
                if (rdev->ops->connect || rdev->ops->auth) {
index 558b0e3a02d8284c49de58d14833c13b444db5a2..1afdf45db38f216bb750a905dcdb5a85ae7d0897 100644 (file)
@@ -935,7 +935,7 @@ freq_reg_info_regd(struct wiphy *wiphy, u32 center_freq,
                if (!band_rule_found)
                        band_rule_found = freq_in_rule_band(fr, center_freq);
 
-               bw_fits = reg_does_bw_fit(fr, center_freq, MHZ_TO_KHZ(5));
+               bw_fits = reg_does_bw_fit(fr, center_freq, MHZ_TO_KHZ(20));
 
                if (band_rule_found && bw_fits)
                        return rr;
@@ -1019,10 +1019,10 @@ static void chan_reg_rule_print_dbg(const struct ieee80211_regdomain *regd,
 }
 #endif
 
-/* Find an ieee80211_reg_rule such that a 5MHz channel with frequency
- * chan->center_freq fits there.
- * If there is no such reg_rule, disable the channel, otherwise set the
- * flags corresponding to the bandwidths allowed in the particular reg_rule
+/*
+ * Note that right now we assume the desired channel bandwidth
+ * is always 20 MHz for each individual channel (HT40 uses 20 MHz
+ * per channel, the primary and the extension channel).
  */
 static void handle_channel(struct wiphy *wiphy,
                           enum nl80211_reg_initiator initiator,
@@ -1083,12 +1083,8 @@ static void handle_channel(struct wiphy *wiphy,
        if (reg_rule->flags & NL80211_RRF_AUTO_BW)
                max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule);
 
-       if (max_bandwidth_khz < MHZ_TO_KHZ(10))
-               bw_flags = IEEE80211_CHAN_NO_10MHZ;
-       if (max_bandwidth_khz < MHZ_TO_KHZ(20))
-               bw_flags |= IEEE80211_CHAN_NO_20MHZ;
        if (max_bandwidth_khz < MHZ_TO_KHZ(40))
-               bw_flags |= IEEE80211_CHAN_NO_HT40;
+               bw_flags = IEEE80211_CHAN_NO_HT40;
        if (max_bandwidth_khz < MHZ_TO_KHZ(80))
                bw_flags |= IEEE80211_CHAN_NO_80MHZ;
        if (max_bandwidth_khz < MHZ_TO_KHZ(160))
@@ -1522,12 +1518,8 @@ static void handle_channel_custom(struct wiphy *wiphy,
        if (reg_rule->flags & NL80211_RRF_AUTO_BW)
                max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule);
 
-       if (max_bandwidth_khz < MHZ_TO_KHZ(10))
-               bw_flags = IEEE80211_CHAN_NO_10MHZ;
-       if (max_bandwidth_khz < MHZ_TO_KHZ(20))
-               bw_flags |= IEEE80211_CHAN_NO_20MHZ;
        if (max_bandwidth_khz < MHZ_TO_KHZ(40))
-               bw_flags |= IEEE80211_CHAN_NO_HT40;
+               bw_flags = IEEE80211_CHAN_NO_HT40;
        if (max_bandwidth_khz < MHZ_TO_KHZ(80))
                bw_flags |= IEEE80211_CHAN_NO_80MHZ;
        if (max_bandwidth_khz < MHZ_TO_KHZ(160))
index 6af50eb80ea7517680dbb7b18981eb9906fde5ae..70faa3a325264a68511a2edca0efa225ebe40942 100644 (file)
@@ -379,11 +379,11 @@ static int special_clk_ctl_put(struct snd_kcontrol *kctl,
        struct special_params *params = bebob->maudio_special_quirk;
        int err, id;
 
-       mutex_lock(&bebob->mutex);
-
        id = uval->value.enumerated.item[0];
        if (id >= ARRAY_SIZE(special_clk_labels))
-               return 0;
+               return -EINVAL;
+
+       mutex_lock(&bebob->mutex);
 
        err = avc_maudio_set_special_clk(bebob, id,
                                         params->dig_in_fmt,
@@ -391,7 +391,10 @@ static int special_clk_ctl_put(struct snd_kcontrol *kctl,
                                         params->clk_lock);
        mutex_unlock(&bebob->mutex);
 
-       return err >= 0;
+       if (err >= 0)
+               err = 1;
+
+       return err;
 }
 static struct snd_kcontrol_new special_clk_ctl = {
        .name   = "Clock Source",
@@ -434,8 +437,8 @@ static struct snd_kcontrol_new special_sync_ctl = {
        .get    = special_sync_ctl_get,
 };
 
-/* Digital interface control for special firmware */
-static char *const special_dig_iface_labels[] = {
+/* Digital input interface control for special firmware */
+static char *const special_dig_in_iface_labels[] = {
        "S/PDIF Optical", "S/PDIF Coaxial", "ADAT Optical"
 };
 static int special_dig_in_iface_ctl_info(struct snd_kcontrol *kctl,
@@ -443,13 +446,13 @@ static int special_dig_in_iface_ctl_info(struct snd_kcontrol *kctl,
 {
        einf->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
        einf->count = 1;
-       einf->value.enumerated.items = ARRAY_SIZE(special_dig_iface_labels);
+       einf->value.enumerated.items = ARRAY_SIZE(special_dig_in_iface_labels);
 
        if (einf->value.enumerated.item >= einf->value.enumerated.items)
                einf->value.enumerated.item = einf->value.enumerated.items - 1;
 
        strcpy(einf->value.enumerated.name,
-              special_dig_iface_labels[einf->value.enumerated.item]);
+              special_dig_in_iface_labels[einf->value.enumerated.item]);
 
        return 0;
 }
@@ -491,26 +494,36 @@ static int special_dig_in_iface_ctl_set(struct snd_kcontrol *kctl,
        unsigned int id, dig_in_fmt, dig_in_iface;
        int err;
 
-       mutex_lock(&bebob->mutex);
-
        id = uval->value.enumerated.item[0];
+       if (id >= ARRAY_SIZE(special_dig_in_iface_labels))
+               return -EINVAL;
 
        /* decode user value */
        dig_in_fmt = (id >> 1) & 0x01;
        dig_in_iface = id & 0x01;
 
+       mutex_lock(&bebob->mutex);
+
        err = avc_maudio_set_special_clk(bebob,
                                         params->clk_src,
                                         dig_in_fmt,
                                         params->dig_out_fmt,
                                         params->clk_lock);
-       if ((err < 0) || (params->dig_in_fmt > 0)) /* ADAT */
+       if (err < 0)
+               goto end;
+
+       /* For ADAT, optical interface is only available. */
+       if (params->dig_in_fmt > 0) {
+               err = 1;
                goto end;
+       }
 
+       /* For S/PDIF, optical/coaxial interfaces are selectable. */
        err = avc_audio_set_selector(bebob->unit, 0x00, 0x04, dig_in_iface);
        if (err < 0)
                dev_err(&bebob->unit->device,
                        "fail to set digital input interface: %d\n", err);
+       err = 1;
 end:
        special_stream_formation_set(bebob);
        mutex_unlock(&bebob->mutex);
@@ -525,18 +538,22 @@ static struct snd_kcontrol_new special_dig_in_iface_ctl = {
        .put    = special_dig_in_iface_ctl_set
 };
 
+/* Digital output interface control for special firmware */
+static char *const special_dig_out_iface_labels[] = {
+       "S/PDIF Optical and Coaxial", "ADAT Optical"
+};
 static int special_dig_out_iface_ctl_info(struct snd_kcontrol *kctl,
                                          struct snd_ctl_elem_info *einf)
 {
        einf->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
        einf->count = 1;
-       einf->value.enumerated.items = ARRAY_SIZE(special_dig_iface_labels) - 1;
+       einf->value.enumerated.items = ARRAY_SIZE(special_dig_out_iface_labels);
 
        if (einf->value.enumerated.item >= einf->value.enumerated.items)
                einf->value.enumerated.item = einf->value.enumerated.items - 1;
 
        strcpy(einf->value.enumerated.name,
-              special_dig_iface_labels[einf->value.enumerated.item + 1]);
+              special_dig_out_iface_labels[einf->value.enumerated.item]);
 
        return 0;
 }
@@ -558,16 +575,20 @@ static int special_dig_out_iface_ctl_set(struct snd_kcontrol *kctl,
        unsigned int id;
        int err;
 
-       mutex_lock(&bebob->mutex);
-
        id = uval->value.enumerated.item[0];
+       if (id >= ARRAY_SIZE(special_dig_out_iface_labels))
+               return -EINVAL;
+
+       mutex_lock(&bebob->mutex);
 
        err = avc_maudio_set_special_clk(bebob,
                                         params->clk_src,
                                         params->dig_in_fmt,
                                         id, params->clk_lock);
-       if (err >= 0)
+       if (err >= 0) {
                special_stream_formation_set(bebob);
+               err = 1;
+       }
 
        mutex_unlock(&bebob->mutex);
        return err;
index 480bbddbd801bf002e4cc43fb8c7c0f762ec40c8..6df04d91c93cd051af2d8b9e25a31706cdf9e2b7 100644 (file)
@@ -193,7 +193,8 @@ azx_assign_device(struct azx *chip, struct snd_pcm_substream *substream)
                                dsp_unlock(azx_dev);
                                return azx_dev;
                        }
-                       if (!res)
+                       if (!res ||
+                           (chip->driver_caps & AZX_DCAPS_REVERSE_ASSIGN))
                                res = azx_dev;
                }
                dsp_unlock(azx_dev);
index b6b4e71a0b0bdfb7dbeab0c1b57662a447ca8f90..83cd19017cf38aeaab7d25d76eb0e9106ec84681 100644 (file)
@@ -227,7 +227,7 @@ enum {
 /* quirks for Intel PCH */
 #define AZX_DCAPS_INTEL_PCH_NOPM \
        (AZX_DCAPS_SCH_SNOOP | AZX_DCAPS_BUFSIZE | \
-        AZX_DCAPS_COUNT_LPIB_DELAY)
+        AZX_DCAPS_COUNT_LPIB_DELAY | AZX_DCAPS_REVERSE_ASSIGN)
 
 #define AZX_DCAPS_INTEL_PCH \
        (AZX_DCAPS_INTEL_PCH_NOPM | AZX_DCAPS_PM_RUNTIME)
@@ -596,7 +596,7 @@ static int azx_suspend(struct device *dev)
        struct azx *chip = card->private_data;
        struct azx_pcm *p;
 
-       if (chip->disabled)
+       if (chip->disabled || chip->init_failed)
                return 0;
 
        snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
@@ -628,7 +628,7 @@ static int azx_resume(struct device *dev)
        struct snd_card *card = dev_get_drvdata(dev);
        struct azx *chip = card->private_data;
 
-       if (chip->disabled)
+       if (chip->disabled || chip->init_failed)
                return 0;
 
        if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
@@ -665,7 +665,7 @@ static int azx_runtime_suspend(struct device *dev)
        struct snd_card *card = dev_get_drvdata(dev);
        struct azx *chip = card->private_data;
 
-       if (chip->disabled)
+       if (chip->disabled || chip->init_failed)
                return 0;
 
        if (!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME))
@@ -692,7 +692,7 @@ static int azx_runtime_resume(struct device *dev)
        struct hda_codec *codec;
        int status;
 
-       if (chip->disabled)
+       if (chip->disabled || chip->init_failed)
                return 0;
 
        if (!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME))
@@ -729,7 +729,7 @@ static int azx_runtime_idle(struct device *dev)
        struct snd_card *card = dev_get_drvdata(dev);
        struct azx *chip = card->private_data;
 
-       if (chip->disabled)
+       if (chip->disabled || chip->init_failed)
                return 0;
 
        if (!power_save_controller ||
index 4a7cb01fa91226b2cfd3a4a582d02d9899ffa6e0..e9d1a5762a55be0a0278b0a91f33a534526103fa 100644 (file)
@@ -186,6 +186,7 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 };
 #define AZX_DCAPS_BUFSIZE      (1 << 21)       /* no buffer size alignment */
 #define AZX_DCAPS_ALIGN_BUFSIZE        (1 << 22)       /* buffer size alignment */
 #define AZX_DCAPS_4K_BDLE_BOUNDARY (1 << 23)   /* BDLE in 4k boundary */
+#define AZX_DCAPS_REVERSE_ASSIGN (1 << 24)     /* Assign devices in reverse order */
 #define AZX_DCAPS_COUNT_LPIB_DELAY  (1 << 25)  /* Take LPIB as delay */
 #define AZX_DCAPS_PM_RUNTIME   (1 << 26)       /* runtime PM support */
 #define AZX_DCAPS_I915_POWERWELL (1 << 27)     /* HSW i915 powerwell support */
index a366ba9293a8103cc5d63139ab281773a2c66812..358414da641839a9f2ce38163161e44b6b10520b 100644 (file)
@@ -236,6 +236,7 @@ disable_hda:
        return rc;
 }
 
+#ifdef CONFIG_PM_SLEEP
 static void hda_tegra_disable_clocks(struct hda_tegra *data)
 {
        clk_disable_unprepare(data->hda2hdmi_clk);
@@ -243,7 +244,6 @@ static void hda_tegra_disable_clocks(struct hda_tegra *data)
        clk_disable_unprepare(data->hda_clk);
 }
 
-#ifdef CONFIG_PM_SLEEP
 /*
  * power management
  */
index 4fe876b65fdaab4e71dbf965cd6fb1be90fa2229..ba4ca52072ff7528ccee3afe66c9beac347f71d0 100644 (file)
@@ -3337,6 +3337,7 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = {
 { .id = 0x10de0051, .name = "GPU 51 HDMI/DP",  .patch = patch_nvhdmi },
 { .id = 0x10de0060, .name = "GPU 60 HDMI/DP",  .patch = patch_nvhdmi },
 { .id = 0x10de0067, .name = "MCP67 HDMI",      .patch = patch_nvhdmi_2ch },
+{ .id = 0x10de0070, .name = "GPU 70 HDMI/DP",  .patch = patch_nvhdmi },
 { .id = 0x10de0071, .name = "GPU 71 HDMI/DP",  .patch = patch_nvhdmi },
 { .id = 0x10de8001, .name = "MCP73 HDMI",      .patch = patch_nvhdmi_2ch },
 { .id = 0x11069f80, .name = "VX900 HDMI/DP",   .patch = patch_via_hdmi },
@@ -3394,6 +3395,7 @@ MODULE_ALIAS("snd-hda-codec-id:10de0044");
 MODULE_ALIAS("snd-hda-codec-id:10de0051");
 MODULE_ALIAS("snd-hda-codec-id:10de0060");
 MODULE_ALIAS("snd-hda-codec-id:10de0067");
+MODULE_ALIAS("snd-hda-codec-id:10de0070");
 MODULE_ALIAS("snd-hda-codec-id:10de0071");
 MODULE_ALIAS("snd-hda-codec-id:10de8001");
 MODULE_ALIAS("snd-hda-codec-id:11069f80");
index c342f7087147e320a5d928272bd4757ffa051612..ee53a42818caee935457a64066a863b5b5477dc6 100644 (file)
@@ -35,7 +35,7 @@ static inline int __mutex_init(liblockdep_pthread_mutex_t *lock,
 
 static inline int liblockdep_pthread_mutex_lock(liblockdep_pthread_mutex_t *lock)
 {
-       lock_acquire(&lock->dep_map, 0, 0, 0, 2, NULL, (unsigned long)_RET_IP_);
+       lock_acquire(&lock->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_);
        return pthread_mutex_lock(&lock->mutex);
 }
 
@@ -47,7 +47,7 @@ static inline int liblockdep_pthread_mutex_unlock(liblockdep_pthread_mutex_t *lo
 
 static inline int liblockdep_pthread_mutex_trylock(liblockdep_pthread_mutex_t *lock)
 {
-       lock_acquire(&lock->dep_map, 0, 1, 0, 2, NULL, (unsigned long)_RET_IP_);
+       lock_acquire(&lock->dep_map, 0, 1, 0, 1, NULL, (unsigned long)_RET_IP_);
        return pthread_mutex_trylock(&lock->mutex) == 0 ? 1 : 0;
 }
 
index a680ab8c2e3647b77745f1fc698a115822d4068c..4ec03f86155163177d64288b38f164d8651027ca 100644 (file)
@@ -36,7 +36,7 @@ static inline int __rwlock_init(liblockdep_pthread_rwlock_t *lock,
 
 static inline int liblockdep_pthread_rwlock_rdlock(liblockdep_pthread_rwlock_t *lock)
 {
-       lock_acquire(&lock->dep_map, 0, 0, 2, 2, NULL, (unsigned long)_RET_IP_);
+       lock_acquire(&lock->dep_map, 0, 0, 2, 1, NULL, (unsigned long)_RET_IP_);
        return pthread_rwlock_rdlock(&lock->rwlock);
 
 }
@@ -49,19 +49,19 @@ static inline int liblockdep_pthread_rwlock_unlock(liblockdep_pthread_rwlock_t *
 
 static inline int liblockdep_pthread_rwlock_wrlock(liblockdep_pthread_rwlock_t *lock)
 {
-       lock_acquire(&lock->dep_map, 0, 0, 0, 2, NULL, (unsigned long)_RET_IP_);
+       lock_acquire(&lock->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_);
        return pthread_rwlock_wrlock(&lock->rwlock);
 }
 
 static inline int liblockdep_pthread_rwlock_tryrdlock(liblockdep_pthread_rwlock_t *lock)
 {
-       lock_acquire(&lock->dep_map, 0, 1, 2, 2, NULL, (unsigned long)_RET_IP_);
+       lock_acquire(&lock->dep_map, 0, 1, 2, 1, NULL, (unsigned long)_RET_IP_);
        return pthread_rwlock_tryrdlock(&lock->rwlock) == 0 ? 1 : 0;
 }
 
 static inline int liblockdep_pthread_rwlock_trywlock(liblockdep_pthread_rwlock_t *lock)
 {
-       lock_acquire(&lock->dep_map, 0, 1, 0, 2, NULL, (unsigned long)_RET_IP_);
+       lock_acquire(&lock->dep_map, 0, 1, 0, 1, NULL, (unsigned long)_RET_IP_);
        return pthread_rwlock_trywlock(&lock->rwlock) == 0 ? 1 : 0;
 }
 
index 23bd69cb5ade7014e8630e87ad89a16e2d95be71..6f803609e498246d277d35829b18c7924a136eca 100644 (file)
@@ -92,7 +92,7 @@ enum { none, prepare, done, } __init_state;
 static void init_preload(void);
 static void try_init_preload(void)
 {
-       if (!__init_state != done)
+       if (__init_state != done)
                init_preload();
 }
 
@@ -252,7 +252,7 @@ int pthread_mutex_lock(pthread_mutex_t *mutex)
 
        try_init_preload();
 
-       lock_acquire(&__get_lock(mutex)->dep_map, 0, 0, 0, 2, NULL,
+       lock_acquire(&__get_lock(mutex)->dep_map, 0, 0, 0, 1, NULL,
                        (unsigned long)_RET_IP_);
        /*
         * Here's the thing with pthread mutexes: unlike the kernel variant,
@@ -281,7 +281,7 @@ int pthread_mutex_trylock(pthread_mutex_t *mutex)
 
        try_init_preload();
 
-       lock_acquire(&__get_lock(mutex)->dep_map, 0, 1, 0, 2, NULL, (unsigned long)_RET_IP_);
+       lock_acquire(&__get_lock(mutex)->dep_map, 0, 1, 0, 1, NULL, (unsigned long)_RET_IP_);
        r = ll_pthread_mutex_trylock(mutex);
        if (r)
                lock_release(&__get_lock(mutex)->dep_map, 0, (unsigned long)_RET_IP_);
@@ -303,7 +303,7 @@ int pthread_mutex_unlock(pthread_mutex_t *mutex)
         */
        r = ll_pthread_mutex_unlock(mutex);
        if (r)
-               lock_acquire(&__get_lock(mutex)->dep_map, 0, 0, 0, 2, NULL, (unsigned long)_RET_IP_);
+               lock_acquire(&__get_lock(mutex)->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_);
 
        return r;
 }
@@ -352,7 +352,7 @@ int pthread_rwlock_rdlock(pthread_rwlock_t *rwlock)
 
         init_preload();
 
-       lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 2, 2, NULL, (unsigned long)_RET_IP_);
+       lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 2, 1, NULL, (unsigned long)_RET_IP_);
        r = ll_pthread_rwlock_rdlock(rwlock);
        if (r)
                lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_);
@@ -366,7 +366,7 @@ int pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock)
 
         init_preload();
 
-       lock_acquire(&__get_lock(rwlock)->dep_map, 0, 1, 2, 2, NULL, (unsigned long)_RET_IP_);
+       lock_acquire(&__get_lock(rwlock)->dep_map, 0, 1, 2, 1, NULL, (unsigned long)_RET_IP_);
        r = ll_pthread_rwlock_tryrdlock(rwlock);
        if (r)
                lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_);
@@ -380,7 +380,7 @@ int pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock)
 
         init_preload();
 
-       lock_acquire(&__get_lock(rwlock)->dep_map, 0, 1, 0, 2, NULL, (unsigned long)_RET_IP_);
+       lock_acquire(&__get_lock(rwlock)->dep_map, 0, 1, 0, 1, NULL, (unsigned long)_RET_IP_);
        r = ll_pthread_rwlock_trywrlock(rwlock);
        if (r)
                 lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_);
@@ -394,7 +394,7 @@ int pthread_rwlock_wrlock(pthread_rwlock_t *rwlock)
 
         init_preload();
 
-       lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 0, 2, NULL, (unsigned long)_RET_IP_);
+       lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_);
        r = ll_pthread_rwlock_wrlock(rwlock);
        if (r)
                lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_);
@@ -411,7 +411,7 @@ int pthread_rwlock_unlock(pthread_rwlock_t *rwlock)
        lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_);
        r = ll_pthread_rwlock_unlock(rwlock);
        if (r)
-               lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 0, 2, NULL, (unsigned long)_RET_IP_);
+               lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_);
 
        return r;
 }
@@ -439,8 +439,6 @@ __attribute__((constructor)) static void init_preload(void)
        ll_pthread_rwlock_unlock = dlsym(RTLD_NEXT, "pthread_rwlock_unlock");
 #endif
 
-       printf("%p\n", ll_pthread_mutex_trylock);fflush(stdout);
-
        lockdep_init();
 
        __init_state = done;
index 52c03fbbba1774b7eb8b4a4695dcf90b439a59e9..04a229aa5c0fd0a5eb25b6cbdece82bdb0b9ba38 100644 (file)
@@ -17,6 +17,7 @@
 #include "../util.h"
 #include "../ui.h"
 #include "map.h"
+#include "annotate.h"
 
 struct hist_browser {
        struct ui_browser   b;
@@ -1593,13 +1594,18 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
                                         bi->to.sym->name) > 0)
                                annotate_t = nr_options++;
                } else {
-
                        if (browser->selection != NULL &&
                            browser->selection->sym != NULL &&
-                           !browser->selection->map->dso->annotate_warned &&
-                               asprintf(&options[nr_options], "Annotate %s",
-                                        browser->selection->sym->name) > 0)
-                               annotate = nr_options++;
+                           !browser->selection->map->dso->annotate_warned) {
+                               struct annotation *notes;
+
+                               notes = symbol__annotation(browser->selection->sym);
+
+                               if (notes->src &&
+                                   asprintf(&options[nr_options], "Annotate %s",
+                                                browser->selection->sym->name) > 0)
+                                       annotate = nr_options++;
+                       }
                }
 
                if (thread != NULL &&
@@ -1656,6 +1662,7 @@ retry_popup_menu:
 
                if (choice == annotate || choice == annotate_t || choice == annotate_f) {
                        struct hist_entry *he;
+                       struct annotation *notes;
                        int err;
 do_annotate:
                        if (!objdump_path && perf_session_env__lookup_objdump(env))
@@ -1679,6 +1686,10 @@ do_annotate:
                                he->ms.map = he->branch_info->to.map;
                        }
 
+                       notes = symbol__annotation(he->ms.sym);
+                       if (!notes->src)
+                               continue;
+
                        /*
                         * Don't let this be freed, say, by hists__decay_entry.
                         */
index 0e5fea95d596755b944968668e9d449473c10134..c73e1fc12e53e520f7074050a9e94f128357bdbf 100644 (file)
@@ -496,18 +496,6 @@ struct process_args {
        u64 start;
 };
 
-static int symbol__in_kernel(void *arg, const char *name,
-                            char type __maybe_unused, u64 start)
-{
-       struct process_args *args = arg;
-
-       if (strchr(name, '['))
-               return 0;
-
-       args->start = start;
-       return 1;
-}
-
 static void machine__get_kallsyms_filename(struct machine *machine, char *buf,
                                           size_t bufsz)
 {
@@ -517,27 +505,41 @@ static void machine__get_kallsyms_filename(struct machine *machine, char *buf,
                scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir);
 }
 
-/* Figure out the start address of kernel map from /proc/kallsyms */
-static u64 machine__get_kernel_start_addr(struct machine *machine)
+const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
+
+/* Figure out the start address of kernel map from /proc/kallsyms.
+ * Returns the name of the start symbol in *symbol_name. Pass in NULL as
+ * symbol_name if it's not that important.
+ */
+static u64 machine__get_kernel_start_addr(struct machine *machine,
+                                         const char **symbol_name)
 {
        char filename[PATH_MAX];
-       struct process_args args;
+       int i;
+       const char *name;
+       u64 addr = 0;
 
        machine__get_kallsyms_filename(machine, filename, PATH_MAX);
 
        if (symbol__restricted_filename(filename, "/proc/kallsyms"))
                return 0;
 
-       if (kallsyms__parse(filename, &args, symbol__in_kernel) <= 0)
-               return 0;
+       for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
+               addr = kallsyms__get_function_start(filename, name);
+               if (addr)
+                       break;
+       }
+
+       if (symbol_name)
+               *symbol_name = name;
 
-       return args.start;
+       return addr;
 }
 
 int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
 {
        enum map_type type;
-       u64 start = machine__get_kernel_start_addr(machine);
+       u64 start = machine__get_kernel_start_addr(machine, NULL);
 
        for (type = 0; type < MAP__NR_TYPES; ++type) {
                struct kmap *kmap;
@@ -852,23 +854,11 @@ static int machine__create_modules(struct machine *machine)
        return 0;
 }
 
-const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
-
 int machine__create_kernel_maps(struct machine *machine)
 {
        struct dso *kernel = machine__get_kernel(machine);
-       char filename[PATH_MAX];
        const char *name;
-       u64 addr = 0;
-       int i;
-
-       machine__get_kallsyms_filename(machine, filename, PATH_MAX);
-
-       for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
-               addr = kallsyms__get_function_start(filename, name);
-               if (addr)
-                       break;
-       }
+       u64 addr = machine__get_kernel_start_addr(machine, &name);
        if (!addr)
                return -1;
 
index e5a3c4be2a10d696c31aebc733234964197affad..3d1537b93c645968410463b75c2721ca4d7c1949 100644 (file)
@@ -108,13 +108,18 @@ DUMP_OBJS = \
        apmain.o\
        osunixdir.o\
        osunixmap.o\
+       osunixxf.o\
        tbprint.o\
        tbxfroot.o\
        utbuffer.o\
+       utdebug.o\
        utexcep.o\
+       utglobal.o\
        utmath.o\
+       utprint.o\
        utstring.o\
        utxferror.o\
+       oslibcfs.o\
        oslinuxtbl.o\
        cmfsize.o\
        getopt.o
index 5140e5edae1fb0d48fd9eb9a0adf6d63f421b0f6..f4b953354ff7e28fa11e86b9b260754ad78c7e99 100644 (file)
@@ -58,44 +58,46 @@ ACPI_MODULE_NAME("cmfsize")
  * RETURN:      File Size. On error, -1 (ACPI_UINT32_MAX)
  *
  * DESCRIPTION: Get the size of a file. Uses seek-to-EOF. File must be open.
- *              Does not disturb the current file pointer. Uses perror for
- *              error messages.
+ *              Does not disturb the current file pointer.
  *
  ******************************************************************************/
-u32 cm_get_file_size(FILE * file)
+u32 cm_get_file_size(ACPI_FILE file)
 {
        long file_size;
        long current_offset;
+       acpi_status status;
 
        /* Save the current file pointer, seek to EOF to obtain file size */
 
-       current_offset = ftell(file);
+       current_offset = acpi_os_get_file_offset(file);
        if (current_offset < 0) {
                goto offset_error;
        }
 
-       if (fseek(file, 0, SEEK_END)) {
+       status = acpi_os_set_file_offset(file, 0, ACPI_FILE_END);
+       if (ACPI_FAILURE(status)) {
                goto seek_error;
        }
 
-       file_size = ftell(file);
+       file_size = acpi_os_get_file_offset(file);
        if (file_size < 0) {
                goto offset_error;
        }
 
        /* Restore original file pointer */
 
-       if (fseek(file, current_offset, SEEK_SET)) {
+       status = acpi_os_set_file_offset(file, current_offset, ACPI_FILE_BEGIN);
+       if (ACPI_FAILURE(status)) {
                goto seek_error;
        }
 
        return ((u32)file_size);
 
 offset_error:
-       perror("Could not get file offset");
+       acpi_log_error("Could not get file offset");
        return (ACPI_UINT32_MAX);
 
 seek_error:
-       perror("Could not seek file");
+       acpi_log_error("Could not set file offset");
        return (ACPI_UINT32_MAX);
 }
index a302f52e4fd354d08aa2502ba181ae2fff566d5d..2f0f34a36db4440660c51366fa4f2bcf2aa6343d 100644 (file)
  *    "f|"      - Option has required single-char sub-options
  */
 
-#include <stdio.h>
-#include <string.h>
 #include <acpi/acpi.h>
 #include "accommon.h"
 #include "acapps.h"
 
 #define ACPI_OPTION_ERROR(msg, badchar) \
-       if (acpi_gbl_opterr) {fprintf (stderr, "%s%c\n", msg, badchar);}
+       if (acpi_gbl_opterr) {acpi_log_error ("%s%c\n", msg, badchar);}
 
 int acpi_gbl_opterr = 1;
 int acpi_gbl_optind = 1;
@@ -113,7 +111,7 @@ int acpi_getopt_argument(int argc, char **argv)
  * PARAMETERS:  argc, argv          - from main
  *              opts                - options info list
  *
- * RETURN:      Option character or EOF
+ * RETURN:      Option character or ACPI_OPT_END
  *
  * DESCRIPTION: Get the next option
  *
@@ -128,10 +126,10 @@ int acpi_getopt(int argc, char **argv, char *opts)
                if (acpi_gbl_optind >= argc ||
                    argv[acpi_gbl_optind][0] != '-' ||
                    argv[acpi_gbl_optind][1] == '\0') {
-                       return (EOF);
-               } else if (strcmp(argv[acpi_gbl_optind], "--") == 0) {
+                       return (ACPI_OPT_END);
+               } else if (ACPI_STRCMP(argv[acpi_gbl_optind], "--") == 0) {
                        acpi_gbl_optind++;
-                       return (EOF);
+                       return (ACPI_OPT_END);
                }
        }
 
@@ -142,7 +140,7 @@ int acpi_getopt(int argc, char **argv, char *opts)
        /* Make sure that the option is legal */
 
        if (current_char == ':' ||
-           (opts_ptr = strchr(opts, current_char)) == NULL) {
+           (opts_ptr = ACPI_STRCHR(opts, current_char)) == NULL) {
                ACPI_OPTION_ERROR("Illegal option: -", current_char);
 
                if (argv[acpi_gbl_optind][++current_char_ptr] == '\0') {
diff --git a/tools/power/acpi/os_specific/service_layers/oslibcfs.c b/tools/power/acpi/os_specific/service_layers/oslibcfs.c
new file mode 100644 (file)
index 0000000..c13ff9c
--- /dev/null
@@ -0,0 +1,214 @@
+/******************************************************************************
+ *
+ * Module Name: oslibcfs - C library OSL for file I/O
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2014, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <stdio.h>
+#include <stdarg.h>
+
+#define _COMPONENT          ACPI_OS_SERVICES
+ACPI_MODULE_NAME("oslibcfs")
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_os_open_file
+ *
+ * PARAMETERS:  path                - File path
+ *              modes               - File operation type
+ *
+ * RETURN:      File descriptor.
+ *
+ * DESCRIPTION: Open a file for reading (ACPI_FILE_READING) or/and writing
+ *              (ACPI_FILE_WRITING).
+ *
+ ******************************************************************************/
+ACPI_FILE acpi_os_open_file(const char *path, u8 modes)
+{
+       ACPI_FILE file;
+       u32 i = 0;
+       char modes_str[4];
+
+       if (modes & ACPI_FILE_READING) {
+               modes_str[i++] = 'r';
+       }
+       if (modes & ACPI_FILE_WRITING) {
+               modes_str[i++] = 'w';
+       }
+       if (modes & ACPI_FILE_BINARY) {
+               modes_str[i++] = 'b';
+       }
+
+       modes_str[i++] = '\0';
+
+       file = fopen(path, modes_str);
+       if (!file) {
+               perror("Could not open file");
+       }
+
+       return (file);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_os_close_file
+ *
+ * PARAMETERS:  file                - An open file descriptor
+ *
+ * RETURN:      None.
+ *
+ * DESCRIPTION: Close a file opened via acpi_os_open_file.
+ *
+ ******************************************************************************/
+
+void acpi_os_close_file(ACPI_FILE file)
+{
+       fclose(file);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_os_read_file
+ *
+ * PARAMETERS:  file                - An open file descriptor
+ *              buffer              - Data buffer
+ *              size                - Data block size
+ *              count               - Number of data blocks
+ *
+ * RETURN:      Number of bytes actually read.
+ *
+ * DESCRIPTION: Read from a file.
+ *
+ ******************************************************************************/
+
+int
+acpi_os_read_file(ACPI_FILE file, void *buffer, acpi_size size, acpi_size count)
+{
+       int length;
+
+       length = fread(buffer, size, count, file);
+       if (length < 0) {
+               perror("Error reading file");
+       }
+
+       return (length);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_os_write_file
+ *
+ * PARAMETERS:  file                - An open file descriptor
+ *              buffer              - Data buffer
+ *              size                - Data block size
+ *              count               - Number of data blocks
+ *
+ * RETURN:      Number of bytes actually written.
+ *
+ * DESCRIPTION: Write to a file.
+ *
+ ******************************************************************************/
+
+int
+acpi_os_write_file(ACPI_FILE file,
+                  void *buffer, acpi_size size, acpi_size count)
+{
+       int length;
+
+       length = fwrite(buffer, size, count, file);
+       if (length < 0) {
+               perror("Error writing file");
+       }
+
+       return (length);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_os_get_file_offset
+ *
+ * PARAMETERS:  file                - An open file descriptor
+ *
+ * RETURN:      Current file pointer position.
+ *
+ * DESCRIPTION: Get current file offset.
+ *
+ ******************************************************************************/
+
+long acpi_os_get_file_offset(ACPI_FILE file)
+{
+       long offset;
+
+       offset = ftell(file);
+       return (offset);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_os_set_file_offset
+ *
+ * PARAMETERS:  file                - An open file descriptor
+ *              offset              - New file offset
+ *              from                - From begin/end of file
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Set current file offset.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_os_set_file_offset(ACPI_FILE file, long offset, u8 from)
+{
+       int ret = 0;
+
+       if (from == ACPI_FILE_BEGIN) {
+               ret = fseek(file, offset, SEEK_SET);
+       }
+       if (from == ACPI_FILE_END) {
+               ret = fseek(file, offset, SEEK_END);
+       }
+
+       if (ret < 0) {
+               return (AE_ERROR);
+       } else {
+               return (AE_OK);
+       }
+}
index 28c52008e854ad8fe8b58372867f654cc5df2ea5..0dc2485dedf5e5f68c2c409ef7ee5fa6facc8d48 100644 (file)
@@ -77,6 +77,9 @@ osl_map_table(acpi_size address,
 
 static void osl_unmap_table(struct acpi_table_header *table);
 
+static acpi_physical_address
+osl_find_rsdp_via_efi_by_keyword(FILE * file, const char *keyword);
+
 static acpi_physical_address osl_find_rsdp_via_efi(void);
 
 static acpi_status osl_load_rsdp(void);
@@ -415,6 +418,38 @@ acpi_os_get_table_by_index(u32 index,
        return (status);
 }
 
+/******************************************************************************
+ *
+ * FUNCTION:    osl_find_rsdp_via_efi_by_keyword
+ *
+ * PARAMETERS:  keyword         - Character string indicating ACPI GUID version
+ *                                in the EFI table
+ *
+ * RETURN:      RSDP address if found
+ *
+ * DESCRIPTION: Find RSDP address via EFI using keyword indicating the ACPI
+ *              GUID version.
+ *
+ *****************************************************************************/
+
+static acpi_physical_address
+osl_find_rsdp_via_efi_by_keyword(FILE * file, const char *keyword)
+{
+       char buffer[80];
+       unsigned long long address = 0;
+       char format[32];
+
+       snprintf(format, 32, "%s=%s", keyword, "%llx");
+       fseek(file, 0, SEEK_SET);
+       while (fgets(buffer, 80, file)) {
+               if (sscanf(buffer, format, &address) == 1) {
+                       break;
+               }
+       }
+
+       return ((acpi_physical_address) (address));
+}
+
 /******************************************************************************
  *
  * FUNCTION:    osl_find_rsdp_via_efi
@@ -430,20 +465,19 @@ acpi_os_get_table_by_index(u32 index,
 static acpi_physical_address osl_find_rsdp_via_efi(void)
 {
        FILE *file;
-       char buffer[80];
-       unsigned long address = 0;
+       acpi_physical_address address = 0;
 
        file = fopen(EFI_SYSTAB, "r");
        if (file) {
-               while (fgets(buffer, 80, file)) {
-                       if (sscanf(buffer, "ACPI20=0x%lx", &address) == 1) {
-                               break;
-                       }
+               address = osl_find_rsdp_via_efi_by_keyword(file, "ACPI20");
+               if (!address) {
+                       address =
+                           osl_find_rsdp_via_efi_by_keyword(file, "ACPI");
                }
                fclose(file);
        }
 
-       return ((acpi_physical_address) (address));
+       return (address);
 }
 
 /******************************************************************************
diff --git a/tools/power/acpi/os_specific/service_layers/osunixxf.c b/tools/power/acpi/os_specific/service_layers/osunixxf.c
new file mode 100644 (file)
index 0000000..60b58cd
--- /dev/null
@@ -0,0 +1,1311 @@
+/******************************************************************************
+ *
+ * Module Name: osunixxf - UNIX OSL interfaces
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2014, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+/*
+ * These interfaces are required in order to compile the ASL compiler and the
+ * various ACPICA tools under Linux or other Unix-like system.
+ */
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "amlcode.h"
+#include "acparser.h"
+#include "acdebug.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <unistd.h>
+#include <sys/time.h>
+#include <semaphore.h>
+#include <pthread.h>
+#include <errno.h>
+
+#define _COMPONENT          ACPI_OS_SERVICES
+ACPI_MODULE_NAME("osunixxf")
+
+u8 acpi_gbl_debug_timeout = FALSE;
+
+/* Upcalls to acpi_exec */
+
+void
+ae_table_override(struct acpi_table_header *existing_table,
+                 struct acpi_table_header **new_table);
+
+typedef void *(*PTHREAD_CALLBACK) (void *);
+
+/* Buffer used by acpi_os_vprintf */
+
+#define ACPI_VPRINTF_BUFFER_SIZE    512
+#define _ASCII_NEWLINE              '\n'
+
+/* Terminal support for acpi_exec only */
+
+#ifdef ACPI_EXEC_APP
+#include <termios.h>
+
+struct termios original_term_attributes;
+int term_attributes_were_set = 0;
+
+acpi_status acpi_ut_read_line(char *buffer, u32 buffer_length, u32 *bytes_read);
+
+static void os_enter_line_edit_mode(void);
+
+static void os_exit_line_edit_mode(void);
+
+/******************************************************************************
+ *
+ * FUNCTION:    os_enter_line_edit_mode, os_exit_line_edit_mode
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Enter/Exit the raw character input mode for the terminal.
+ *
+ * Interactive line-editing support for the AML debugger. Used with the
+ * common/acgetline module.
+ *
+ * readline() is not used because of non-portability. It is not available
+ * on all systems, and if it is, often the package must be manually installed.
+ *
+ * Therefore, we use the POSIX tcgetattr/tcsetattr and do the minimal line
+ * editing that we need in acpi_os_get_line.
+ *
+ * If the POSIX tcgetattr/tcsetattr interfaces are unavailable, these
+ * calls will also work:
+ *     For os_enter_line_edit_mode: system ("stty cbreak -echo")
+ *     For os_exit_line_edit_mode: system ("stty cooked echo")
+ *
+ *****************************************************************************/
+
+static void os_enter_line_edit_mode(void)
+{
+       struct termios local_term_attributes;
+
+       /* Get and keep the original attributes */
+
+       if (tcgetattr(STDIN_FILENO, &original_term_attributes)) {
+               fprintf(stderr, "Could not get terminal attributes!\n");
+               return;
+       }
+
+       /* Set the new attributes to enable raw character input */
+
+       memcpy(&local_term_attributes, &original_term_attributes,
+              sizeof(struct termios));
+
+       local_term_attributes.c_lflag &= ~(ICANON | ECHO);
+       local_term_attributes.c_cc[VMIN] = 1;
+       local_term_attributes.c_cc[VTIME] = 0;
+
+       if (tcsetattr(STDIN_FILENO, TCSANOW, &local_term_attributes)) {
+               fprintf(stderr, "Could not set terminal attributes!\n");
+               return;
+       }
+
+       term_attributes_were_set = 1;
+}
+
+static void os_exit_line_edit_mode(void)
+{
+
+       if (!term_attributes_were_set) {
+               return;
+       }
+
+       /* Set terminal attributes back to the original values */
+
+       if (tcsetattr(STDIN_FILENO, TCSANOW, &original_term_attributes)) {
+               fprintf(stderr, "Could not restore terminal attributes!\n");
+       }
+}
+
+#else
+
+/* These functions are not needed for other ACPICA utilities */
+
+#define os_enter_line_edit_mode()
+#define os_exit_line_edit_mode()
+#endif
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_os_initialize, acpi_os_terminate
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Initialize and terminate this module.
+ *
+ *****************************************************************************/
+
+acpi_status acpi_os_initialize(void)
+{
+       acpi_status status;
+
+       acpi_gbl_output_file = stdout;
+
+       os_enter_line_edit_mode();
+
+       status = acpi_os_create_lock(&acpi_gbl_print_lock);
+       if (ACPI_FAILURE(status)) {
+               return (status);
+       }
+
+       return (AE_OK);
+}
+
+acpi_status acpi_os_terminate(void)
+{
+
+       os_exit_line_edit_mode();
+       return (AE_OK);
+}
+
+#ifndef ACPI_USE_NATIVE_RSDP_POINTER
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_os_get_root_pointer
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      RSDP physical address
+ *
+ * DESCRIPTION: Gets the ACPI root pointer (RSDP)
+ *
+ *****************************************************************************/
+
+acpi_physical_address acpi_os_get_root_pointer(void)
+{
+
+       return (0);
+}
+#endif
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_os_predefined_override
+ *
+ * PARAMETERS:  init_val            - Initial value of the predefined object
+ *              new_val             - The new value for the object
+ *
+ * RETURN:      Status, pointer to value. Null pointer returned if not
+ *              overriding.
+ *
+ * DESCRIPTION: Allow the OS to override predefined names
+ *
+ *****************************************************************************/
+
+acpi_status
+acpi_os_predefined_override(const struct acpi_predefined_names * init_val,
+                           acpi_string * new_val)
+{
+
+       if (!init_val || !new_val) {
+               return (AE_BAD_PARAMETER);
+       }
+
+       *new_val = NULL;
+       return (AE_OK);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_os_table_override
+ *
+ * PARAMETERS:  existing_table      - Header of current table (probably
+ *                                    firmware)
+ *              new_table           - Where an entire new table is returned.
+ *
+ * RETURN:      Status, pointer to new table. Null pointer returned if no
+ *              table is available to override
+ *
+ * DESCRIPTION: Return a different version of a table if one is available
+ *
+ *****************************************************************************/
+
+acpi_status
+acpi_os_table_override(struct acpi_table_header * existing_table,
+                      struct acpi_table_header ** new_table)
+{
+
+       if (!existing_table || !new_table) {
+               return (AE_BAD_PARAMETER);
+       }
+
+       *new_table = NULL;
+
+#ifdef ACPI_EXEC_APP
+
+       ae_table_override(existing_table, new_table);
+       return (AE_OK);
+#else
+
+       return (AE_NO_ACPI_TABLES);
+#endif
+}
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_os_physical_table_override
+ *
+ * PARAMETERS:  existing_table      - Header of current table (probably firmware)
+ *              new_address         - Where new table address is returned
+ *                                    (Physical address)
+ *              new_table_length    - Where new table length is returned
+ *
+ * RETURN:      Status, address/length of new table. Null pointer returned
+ *              if no table is available to override.
+ *
+ * DESCRIPTION: Returns AE_SUPPORT, function not used in user space.
+ *
+ *****************************************************************************/
+
+acpi_status
+acpi_os_physical_table_override(struct acpi_table_header * existing_table,
+                               acpi_physical_address * new_address,
+                               u32 *new_table_length)
+{
+
+       return (AE_SUPPORT);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_os_redirect_output
+ *
+ * PARAMETERS:  destination         - An open file handle/pointer
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Causes redirect of acpi_os_printf and acpi_os_vprintf
+ *
+ *****************************************************************************/
+
+void acpi_os_redirect_output(void *destination)
+{
+
+       acpi_gbl_output_file = destination;
+}
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_os_printf
+ *
+ * PARAMETERS:  fmt, ...            - Standard printf format
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Formatted output. Note: very similar to acpi_os_vprintf
+ *              (performance), changes should be tracked in both functions.
+ *
+ *****************************************************************************/
+
+void ACPI_INTERNAL_VAR_XFACE acpi_os_printf(const char *fmt, ...)
+{
+       va_list args;
+       u8 flags;
+
+       flags = acpi_gbl_db_output_flags;
+       if (flags & ACPI_DB_REDIRECTABLE_OUTPUT) {
+
+               /* Output is directable to either a file (if open) or the console */
+
+               if (acpi_gbl_debug_file) {
+
+                       /* Output file is open, send the output there */
+
+                       va_start(args, fmt);
+                       vfprintf(acpi_gbl_debug_file, fmt, args);
+                       va_end(args);
+               } else {
+                       /* No redirection, send output to console (once only!) */
+
+                       flags |= ACPI_DB_CONSOLE_OUTPUT;
+               }
+       }
+
+       if (flags & ACPI_DB_CONSOLE_OUTPUT) {
+               va_start(args, fmt);
+               vfprintf(acpi_gbl_output_file, fmt, args);
+               va_end(args);
+       }
+}
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_os_vprintf
+ *
+ * PARAMETERS:  fmt                 - Standard printf format
+ *              args                - Argument list
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Formatted output with argument list pointer. Note: very
+ *              similar to acpi_os_printf, changes should be tracked in both
+ *              functions.
+ *
+ *****************************************************************************/
+
+void acpi_os_vprintf(const char *fmt, va_list args)
+{
+       u8 flags;
+       char buffer[ACPI_VPRINTF_BUFFER_SIZE];
+
+       /*
+        * We build the output string in a local buffer because we may be
+        * outputting the buffer twice. Using vfprintf is problematic because
+        * some implementations modify the args pointer/structure during
+        * execution. Thus, we use the local buffer for portability.
+        *
+        * Note: Since this module is intended for use by the various ACPICA
+        * utilities/applications, we can safely declare the buffer on the stack.
+        * Also, This function is used for relatively small error messages only.
+        */
+       vsnprintf(buffer, ACPI_VPRINTF_BUFFER_SIZE, fmt, args);
+
+       flags = acpi_gbl_db_output_flags;
+       if (flags & ACPI_DB_REDIRECTABLE_OUTPUT) {
+
+               /* Output is directable to either a file (if open) or the console */
+
+               if (acpi_gbl_debug_file) {
+
+                       /* Output file is open, send the output there */
+
+                       fputs(buffer, acpi_gbl_debug_file);
+               } else {
+                       /* No redirection, send output to console (once only!) */
+
+                       flags |= ACPI_DB_CONSOLE_OUTPUT;
+               }
+       }
+
+       if (flags & ACPI_DB_CONSOLE_OUTPUT) {
+               fputs(buffer, acpi_gbl_output_file);
+       }
+}
+
+#ifndef ACPI_EXEC_APP
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_os_get_line
+ *
+ * PARAMETERS:  buffer              - Where to return the command line
+ *              buffer_length       - Maximum length of Buffer
+ *              bytes_read          - Where the actual byte count is returned
+ *
+ * RETURN:      Status and actual bytes read
+ *
+ * DESCRIPTION: Get the next input line from the terminal. NOTE: For the
+ *              acpi_exec utility, we use the acgetline module instead to
+ *              provide line-editing and history support.
+ *
+ *****************************************************************************/
+
+acpi_status acpi_os_get_line(char *buffer, u32 buffer_length, u32 *bytes_read)
+{
+       int input_char;
+       u32 end_of_line;
+
+       /* Standard acpi_os_get_line for all utilities except acpi_exec */
+
+       for (end_of_line = 0;; end_of_line++) {
+               if (end_of_line >= buffer_length) {
+                       return (AE_BUFFER_OVERFLOW);
+               }
+
+               if ((input_char = getchar()) == EOF) {
+                       return (AE_ERROR);
+               }
+
+               if (!input_char || input_char == _ASCII_NEWLINE) {
+                       break;
+               }
+
+               buffer[end_of_line] = (char)input_char;
+       }
+
+       /* Null terminate the buffer */
+
+       buffer[end_of_line] = 0;
+
+       /* Return the number of bytes in the string */
+
+       if (bytes_read) {
+               *bytes_read = end_of_line;
+       }
+
+       return (AE_OK);
+}
+#endif
+
+#ifndef ACPI_USE_NATIVE_MEMORY_MAPPING
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_os_map_memory
+ *
+ * PARAMETERS:  where               - Physical address of memory to be mapped
+ *              length              - How much memory to map
+ *
+ * RETURN:      Pointer to mapped memory. Null on error.
+ *
+ * DESCRIPTION: Map physical memory into caller's address space
+ *
+ *****************************************************************************/
+
+void *acpi_os_map_memory(acpi_physical_address where, acpi_size length)
+{
+
+       return (ACPI_TO_POINTER((acpi_size) where));
+}
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_os_unmap_memory
+ *
+ * PARAMETERS:  where               - Logical address of memory to be unmapped
+ *              length              - How much memory to unmap
+ *
+ * RETURN:      None.
+ *
+ * DESCRIPTION: Delete a previously created mapping. Where and Length must
+ *              correspond to a previous mapping exactly.
+ *
+ *****************************************************************************/
+
+void acpi_os_unmap_memory(void *where, acpi_size length)
+{
+
+       return;
+}
+#endif
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_os_allocate
+ *
+ * PARAMETERS:  size                - Amount to allocate, in bytes
+ *
+ * RETURN:      Pointer to the new allocation. Null on error.
+ *
+ * DESCRIPTION: Allocate memory. Algorithm is dependent on the OS.
+ *
+ *****************************************************************************/
+
+void *acpi_os_allocate(acpi_size size)
+{
+       void *mem;
+
+       mem = (void *)malloc((size_t) size);
+       return (mem);
+}
+
+#ifdef USE_NATIVE_ALLOCATE_ZEROED
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_os_allocate_zeroed
+ *
+ * PARAMETERS:  size                - Amount to allocate, in bytes
+ *
+ * RETURN:      Pointer to the new allocation. Null on error.
+ *
+ * DESCRIPTION: Allocate and zero memory. Algorithm is dependent on the OS.
+ *
+ *****************************************************************************/
+
+void *acpi_os_allocate_zeroed(acpi_size size)
+{
+       void *mem;
+
+       mem = (void *)calloc(1, (size_t) size);
+       return (mem);
+}
+#endif
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_os_free
+ *
+ * PARAMETERS:  mem                 - Pointer to previously allocated memory
+ *
+ * RETURN:      None.
+ *
+ * DESCRIPTION: Free memory allocated via acpi_os_allocate
+ *
+ *****************************************************************************/
+
+void acpi_os_free(void *mem)
+{
+
+       free(mem);
+}
+
+#ifdef ACPI_SINGLE_THREADED
+/******************************************************************************
+ *
+ * FUNCTION:    Semaphore stub functions
+ *
+ * DESCRIPTION: Stub functions used for single-thread applications that do
+ *              not require semaphore synchronization. Full implementations
+ *              of these functions appear after the stubs.
+ *
+ *****************************************************************************/
+
+acpi_status
+acpi_os_create_semaphore(u32 max_units,
+                        u32 initial_units, acpi_handle * out_handle)
+{
+       *out_handle = (acpi_handle) 1;
+       return (AE_OK);
+}
+
+acpi_status acpi_os_delete_semaphore(acpi_handle handle)
+{
+       return (AE_OK);
+}
+
+acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
+{
+       return (AE_OK);
+}
+
+acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units)
+{
+       return (AE_OK);
+}
+
+#else
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_os_create_semaphore
+ *
+ * PARAMETERS:  initial_units       - Units to be assigned to the new semaphore
+ *              out_handle          - Where a handle will be returned
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Create an OS semaphore
+ *
+ *****************************************************************************/
+
+acpi_status
+acpi_os_create_semaphore(u32 max_units,
+                        u32 initial_units, acpi_handle * out_handle)
+{
+       sem_t *sem;
+
+       if (!out_handle) {
+               return (AE_BAD_PARAMETER);
+       }
+#ifdef __APPLE__
+       {
+               char *semaphore_name = tmpnam(NULL);
+
+               sem =
+                   sem_open(semaphore_name, O_EXCL | O_CREAT, 0755,
+                            initial_units);
+               if (!sem) {
+                       return (AE_NO_MEMORY);
+               }
+               sem_unlink(semaphore_name);     /* This just deletes the name */
+       }
+
+#else
+       sem = acpi_os_allocate(sizeof(sem_t));
+       if (!sem) {
+               return (AE_NO_MEMORY);
+       }
+
+       if (sem_init(sem, 0, initial_units) == -1) {
+               acpi_os_free(sem);
+               return (AE_BAD_PARAMETER);
+       }
+#endif
+
+       *out_handle = (acpi_handle) sem;
+       return (AE_OK);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_os_delete_semaphore
+ *
+ * PARAMETERS:  handle              - Handle returned by acpi_os_create_semaphore
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Delete an OS semaphore
+ *
+ *****************************************************************************/
+
+acpi_status acpi_os_delete_semaphore(acpi_handle handle)
+{
+       sem_t *sem = (sem_t *) handle;
+
+       if (!sem) {
+               return (AE_BAD_PARAMETER);
+       }
+
+       if (sem_destroy(sem) == -1) {
+               return (AE_BAD_PARAMETER);
+       }
+
+       return (AE_OK);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_os_wait_semaphore
+ *
+ * PARAMETERS:  handle              - Handle returned by acpi_os_create_semaphore
+ *              units               - How many units to wait for
+ *              msec_timeout        - How long to wait (milliseconds)
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Wait for units
+ *
+ *****************************************************************************/
+
+acpi_status
+acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 msec_timeout)
+{
+       acpi_status status = AE_OK;
+       sem_t *sem = (sem_t *) handle;
+#ifndef ACPI_USE_ALTERNATE_TIMEOUT
+       struct timespec time;
+       int ret_val;
+#endif
+
+       if (!sem) {
+               return (AE_BAD_PARAMETER);
+       }
+
+       switch (msec_timeout) {
+               /*
+                * No Wait:
+                * --------
+                * A zero timeout value indicates that we shouldn't wait - just
+                * acquire the semaphore if available otherwise return AE_TIME
+                * (a.k.a. 'would block').
+                */
+       case 0:
+
+               if (sem_trywait(sem) == -1) {
+                       status = (AE_TIME);
+               }
+               break;
+
+               /* Wait Indefinitely */
+
+       case ACPI_WAIT_FOREVER:
+
+               if (sem_wait(sem)) {
+                       status = (AE_TIME);
+               }
+               break;
+
+               /* Wait with msec_timeout */
+
+       default:
+
+#ifdef ACPI_USE_ALTERNATE_TIMEOUT
+               /*
+                * Alternate timeout mechanism for environments where
+                * sem_timedwait is not available or does not work properly.
+                */
+               while (msec_timeout) {
+                       if (sem_trywait(sem) == 0) {
+
+                               /* Got the semaphore */
+                               return (AE_OK);
+                       }
+
+                       if (msec_timeout >= 10) {
+                               msec_timeout -= 10;
+                               usleep(10 * ACPI_USEC_PER_MSEC);        /* ten milliseconds */
+                       } else {
+                               msec_timeout--;
+                               usleep(ACPI_USEC_PER_MSEC);     /* one millisecond */
+                       }
+               }
+               status = (AE_TIME);
+#else
+               /*
+                * The interface to sem_timedwait is an absolute time, so we need to
+                * get the current time, then add in the millisecond Timeout value.
+                */
+               if (clock_gettime(CLOCK_REALTIME, &time) == -1) {
+                       perror("clock_gettime");
+                       return (AE_TIME);
+               }
+
+               time.tv_sec += (msec_timeout / ACPI_MSEC_PER_SEC);
+               time.tv_nsec +=
+                   ((msec_timeout % ACPI_MSEC_PER_SEC) * ACPI_NSEC_PER_MSEC);
+
+               /* Handle nanosecond overflow (field must be less than one second) */
+
+               if (time.tv_nsec >= ACPI_NSEC_PER_SEC) {
+                       time.tv_sec += (time.tv_nsec / ACPI_NSEC_PER_SEC);
+                       time.tv_nsec = (time.tv_nsec % ACPI_NSEC_PER_SEC);
+               }
+
+               while (((ret_val = sem_timedwait(sem, &time)) == -1)
+                      && (errno == EINTR)) {
+                       continue;
+               }
+
+               if (ret_val != 0) {
+                       if (errno != ETIMEDOUT) {
+                               perror("sem_timedwait");
+                       }
+                       status = (AE_TIME);
+               }
+#endif
+               break;
+       }
+
+       return (status);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_os_signal_semaphore
+ *
+ * PARAMETERS:  handle              - Handle returned by acpi_os_create_semaphore
+ *              units               - Number of units to send
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Send units
+ *
+ *****************************************************************************/
+
+acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units)
+{
+       sem_t *sem = (sem_t *) handle;
+
+       if (!sem) {
+               return (AE_BAD_PARAMETER);
+       }
+
+       if (sem_post(sem) == -1) {
+               return (AE_LIMIT);
+       }
+
+       return (AE_OK);
+}
+
+#endif                         /* ACPI_SINGLE_THREADED */
+
+/******************************************************************************
+ *
+ * FUNCTION:    Spinlock interfaces
+ *
+ * DESCRIPTION: Map these interfaces to semaphore interfaces
+ *
+ *****************************************************************************/
+
+acpi_status acpi_os_create_lock(acpi_spinlock * out_handle)
+{
+
+       return (acpi_os_create_semaphore(1, 1, out_handle));
+}
+
+void acpi_os_delete_lock(acpi_spinlock handle)
+{
+       acpi_os_delete_semaphore(handle);
+}
+
+acpi_cpu_flags acpi_os_acquire_lock(acpi_handle handle)
+{
+       acpi_os_wait_semaphore(handle, 1, 0xFFFF);
+       return (0);
+}
+
+void acpi_os_release_lock(acpi_spinlock handle, acpi_cpu_flags flags)
+{
+       acpi_os_signal_semaphore(handle, 1);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_os_install_interrupt_handler
+ *
+ * PARAMETERS:  interrupt_number    - Level handler should respond to.
+ *              isr                 - Address of the ACPI interrupt handler
+ *              except_ptr          - Where status is returned
+ *
+ * RETURN:      Handle to the newly installed handler.
+ *
+ * DESCRIPTION: Install an interrupt handler. Used to install the ACPI
+ *              OS-independent handler.
+ *
+ *****************************************************************************/
+
+u32
+acpi_os_install_interrupt_handler(u32 interrupt_number,
+                                 acpi_osd_handler service_routine,
+                                 void *context)
+{
+
+       return (AE_OK);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_os_remove_interrupt_handler
+ *
+ * PARAMETERS:  handle              - Returned when handler was installed
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Uninstalls an interrupt handler.
+ *
+ *****************************************************************************/
+
+acpi_status
+acpi_os_remove_interrupt_handler(u32 interrupt_number,
+                                acpi_osd_handler service_routine)
+{
+
+       return (AE_OK);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_os_stall
+ *
+ * PARAMETERS:  microseconds        - Time to sleep
+ *
+ * RETURN:      Blocks until sleep is completed.
+ *
+ * DESCRIPTION: Sleep at microsecond granularity
+ *
+ *****************************************************************************/
+
+void acpi_os_stall(u32 microseconds)
+{
+
+       if (microseconds) {
+               usleep(microseconds);
+       }
+}
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_os_sleep
+ *
+ * PARAMETERS:  milliseconds        - Time to sleep
+ *
+ * RETURN:      Blocks until sleep is completed.
+ *
+ * DESCRIPTION: Sleep at millisecond granularity
+ *
+ *****************************************************************************/
+
+void acpi_os_sleep(u64 milliseconds)
+{
+
+       /* Sleep for whole seconds */
+
+       sleep(milliseconds / ACPI_MSEC_PER_SEC);
+
+       /*
+        * Sleep for remaining microseconds.
+        * Arg to usleep() is in usecs and must be less than 1,000,000 (1 second).
+        */
+       usleep((milliseconds % ACPI_MSEC_PER_SEC) * ACPI_USEC_PER_MSEC);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_os_get_timer
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      Current time in 100 nanosecond units
+ *
+ * DESCRIPTION: Get the current system time
+ *
+ *****************************************************************************/
+
+u64 acpi_os_get_timer(void)
+{
+       struct timeval time;
+
+       /* This timer has sufficient resolution for user-space application code */
+
+       gettimeofday(&time, NULL);
+
+       /* (Seconds * 10^7 = 100ns(10^-7)) + (Microseconds(10^-6) * 10^1 = 100ns) */
+
+       return (((u64)time.tv_sec * ACPI_100NSEC_PER_SEC) +
+               ((u64)time.tv_usec * ACPI_100NSEC_PER_USEC));
+}
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_os_read_pci_configuration
+ *
+ * PARAMETERS:  pci_id              - Seg/Bus/Dev
+ *              pci_register        - Device Register
+ *              value               - Buffer where value is placed
+ *              width               - Number of bits
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Read data from PCI configuration space
+ *
+ *****************************************************************************/
+
+acpi_status
+acpi_os_read_pci_configuration(struct acpi_pci_id *pci_id,
+                              u32 pci_register, u64 *value, u32 width)
+{
+
+       *value = 0;
+       return (AE_OK);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_os_write_pci_configuration
+ *
+ * PARAMETERS:  pci_id              - Seg/Bus/Dev
+ *              pci_register        - Device Register
+ *              value               - Value to be written
+ *              width               - Number of bits
+ *
+ * RETURN:      Status.
+ *
+ * DESCRIPTION: Write data to PCI configuration space
+ *
+ *****************************************************************************/
+
+acpi_status
+acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id,
+                               u32 pci_register, u64 value, u32 width)
+{
+
+       return (AE_OK);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_os_read_port
+ *
+ * PARAMETERS:  address             - Address of I/O port/register to read
+ *              value               - Where value is placed
+ *              width               - Number of bits
+ *
+ * RETURN:      Value read from port
+ *
+ * DESCRIPTION: Read data from an I/O port or register
+ *
+ *****************************************************************************/
+
+acpi_status acpi_os_read_port(acpi_io_address address, u32 *value, u32 width)
+{
+
+       switch (width) {
+       case 8:
+
+               *value = 0xFF;
+               break;
+
+       case 16:
+
+               *value = 0xFFFF;
+               break;
+
+       case 32:
+
+               *value = 0xFFFFFFFF;
+               break;
+
+       default:
+
+               return (AE_BAD_PARAMETER);
+       }
+
+       return (AE_OK);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_os_write_port
+ *
+ * PARAMETERS:  address             - Address of I/O port/register to write
+ *              value               - Value to write
+ *              width               - Number of bits
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Write data to an I/O port or register
+ *
+ *****************************************************************************/
+
+acpi_status acpi_os_write_port(acpi_io_address address, u32 value, u32 width)
+{
+
+       return (AE_OK);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_os_read_memory
+ *
+ * PARAMETERS:  address             - Physical Memory Address to read
+ *              value               - Where value is placed
+ *              width               - Number of bits (8,16,32, or 64)
+ *
+ * RETURN:      Value read from physical memory address. Always returned
+ *              as a 64-bit integer, regardless of the read width.
+ *
+ * DESCRIPTION: Read data from a physical memory address
+ *
+ *****************************************************************************/
+
+acpi_status
+acpi_os_read_memory(acpi_physical_address address, u64 *value, u32 width)
+{
+
+       switch (width) {
+       case 8:
+       case 16:
+       case 32:
+       case 64:
+
+               *value = 0;
+               break;
+
+       default:
+
+               return (AE_BAD_PARAMETER);
+       }
+       return (AE_OK);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_os_write_memory
+ *
+ * PARAMETERS:  address             - Physical Memory Address to write
+ *              value               - Value to write
+ *              width               - Number of bits (8,16,32, or 64)
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Write data to a physical memory address
+ *
+ *****************************************************************************/
+
+acpi_status
+acpi_os_write_memory(acpi_physical_address address, u64 value, u32 width)
+{
+
+       return (AE_OK);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_os_readable
+ *
+ * PARAMETERS:  pointer             - Area to be verified
+ *              length              - Size of area
+ *
+ * RETURN:      TRUE if readable for entire length
+ *
+ * DESCRIPTION: Verify that a pointer is valid for reading
+ *
+ *****************************************************************************/
+
+u8 acpi_os_readable(void *pointer, acpi_size length)
+{
+
+       return (TRUE);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_os_writable
+ *
+ * PARAMETERS:  pointer             - Area to be verified
+ *              length              - Size of area
+ *
+ * RETURN:      TRUE if writable for entire length
+ *
+ * DESCRIPTION: Verify that a pointer is valid for writing
+ *
+ *****************************************************************************/
+
+u8 acpi_os_writable(void *pointer, acpi_size length)
+{
+
+       return (TRUE);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_os_signal
+ *
+ * PARAMETERS:  function            - ACPI A signal function code
+ *              info                - Pointer to function-dependent structure
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Miscellaneous functions. Example implementation only.
+ *
+ *****************************************************************************/
+
+acpi_status acpi_os_signal(u32 function, void *info)
+{
+
+       switch (function) {
+       case ACPI_SIGNAL_FATAL:
+
+               break;
+
+       case ACPI_SIGNAL_BREAKPOINT:
+
+               break;
+
+       default:
+
+               break;
+       }
+
+       return (AE_OK);
+}
+
+/* Optional multi-thread support */
+
+#ifndef ACPI_SINGLE_THREADED
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_os_get_thread_id
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      Id of the running thread
+ *
+ * DESCRIPTION: Get the ID of the current (running) thread
+ *
+ *****************************************************************************/
+
+acpi_thread_id acpi_os_get_thread_id(void)
+{
+       pthread_t thread;
+
+       thread = pthread_self();
+       return (ACPI_CAST_PTHREAD_T(thread));
+}
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_os_execute
+ *
+ * PARAMETERS:  type                - Type of execution
+ *              function            - Address of the function to execute
+ *              context             - Passed as a parameter to the function
+ *
+ * RETURN:      Status.
+ *
+ * DESCRIPTION: Execute a new thread
+ *
+ *****************************************************************************/
+
+acpi_status
+acpi_os_execute(acpi_execute_type type,
+               acpi_osd_exec_callback function, void *context)
+{
+       pthread_t thread;
+       int ret;
+
+       ret =
+           pthread_create(&thread, NULL, (PTHREAD_CALLBACK) function, context);
+       if (ret) {
+               acpi_os_printf("Create thread failed");
+       }
+       return (0);
+}
+
+#else                          /* ACPI_SINGLE_THREADED */
+acpi_thread_id acpi_os_get_thread_id(void)
+{
+       return (1);
+}
+
+acpi_status
+acpi_os_execute(acpi_execute_type type,
+               acpi_osd_exec_callback function, void *context)
+{
+
+       function(context);
+
+       return (AE_OK);
+}
+
+#endif                         /* ACPI_SINGLE_THREADED */
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_os_wait_events_complete
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Wait for all asynchronous events to complete. This
+ *              implementation does nothing.
+ *
+ *****************************************************************************/
+
+void acpi_os_wait_events_complete(void)
+{
+       return;
+}
index 46f519597fe5eb4a8e2f8674f2bcaef29f31eb0c..a2d37d610639afaf4fc1f23e6742ae74001f6bdd 100644 (file)
@@ -47,7 +47,6 @@
 #ifdef _DECLARE_GLOBALS
 #define EXTERN
 #define INIT_GLOBAL(a,b)        a=b
-#define DEFINE_ACPI_GLOBALS     1
 #else
 #define EXTERN                  extern
 #define INIT_GLOBAL(a,b)        a
@@ -69,7 +68,7 @@ EXTERN u8 INIT_GLOBAL(gbl_verbose_mode, FALSE);
 EXTERN u8 INIT_GLOBAL(gbl_binary_mode, FALSE);
 EXTERN u8 INIT_GLOBAL(gbl_dump_customized_tables, FALSE);
 EXTERN u8 INIT_GLOBAL(gbl_do_not_dump_xsdt, FALSE);
-EXTERN FILE INIT_GLOBAL(*gbl_output_file, NULL);
+EXTERN ACPI_FILE INIT_GLOBAL(gbl_output_file, NULL);
 EXTERN char INIT_GLOBAL(*gbl_output_filename, NULL);
 EXTERN u64 INIT_GLOBAL(gbl_rsdp_base, 0);
 
index 3cac123783663083849b8589d893b3059075552c..53cee781e24e646f98a041ff1906b1fb13158a3a 100644 (file)
@@ -69,17 +69,16 @@ u8 ap_is_valid_header(struct acpi_table_header *table)
                /* Make sure signature is all ASCII and a valid ACPI name */
 
                if (!acpi_ut_valid_acpi_name(table->signature)) {
-                       fprintf(stderr,
-                               "Table signature (0x%8.8X) is invalid\n",
-                               *(u32 *)table->signature);
+                       acpi_log_error("Table signature (0x%8.8X) is invalid\n",
+                                      *(u32 *)table->signature);
                        return (FALSE);
                }
 
                /* Check for minimum table length */
 
                if (table->length < sizeof(struct acpi_table_header)) {
-                       fprintf(stderr, "Table length (0x%8.8X) is invalid\n",
-                               table->length);
+                       acpi_log_error("Table length (0x%8.8X) is invalid\n",
+                                      table->length);
                        return (FALSE);
                }
        }
@@ -116,8 +115,8 @@ u8 ap_is_valid_checksum(struct acpi_table_header *table)
        }
 
        if (ACPI_FAILURE(status)) {
-               fprintf(stderr, "%4.4s: Warning: wrong checksum in table\n",
-                       table->signature);
+               acpi_log_error("%4.4s: Warning: wrong checksum in table\n",
+                              table->signature);
        }
 
        return (AE_OK);
@@ -196,12 +195,13 @@ ap_dump_table_buffer(struct acpi_table_header *table,
         * Note: simplest to just always emit a 64-bit address. acpi_xtract
         * utility can handle this.
         */
-       printf("%4.4s @ 0x%8.8X%8.8X\n", table->signature,
-              ACPI_FORMAT_UINT64(address));
+       acpi_ut_file_printf(gbl_output_file, "%4.4s @ 0x%8.8X%8.8X\n",
+                           table->signature, ACPI_FORMAT_UINT64(address));
 
-       acpi_ut_dump_buffer(ACPI_CAST_PTR(u8, table), table_length,
-                           DB_BYTE_DISPLAY, 0);
-       printf("\n");
+       acpi_ut_dump_buffer_to_file(gbl_output_file,
+                                   ACPI_CAST_PTR(u8, table), table_length,
+                                   DB_BYTE_DISPLAY, 0);
+       acpi_ut_file_printf(gbl_output_file, "\n");
        return (0);
 }
 
@@ -239,20 +239,20 @@ int ap_dump_all_tables(void)
                        if (status == AE_LIMIT) {
                                return (0);
                        } else if (i == 0) {
-                               fprintf(stderr,
-                                       "Could not get ACPI tables, %s\n",
-                                       acpi_format_exception(status));
+                               acpi_log_error
+                                   ("Could not get ACPI tables, %s\n",
+                                    acpi_format_exception(status));
                                return (-1);
                        } else {
-                               fprintf(stderr,
-                                       "Could not get ACPI table at index %u, %s\n",
-                                       i, acpi_format_exception(status));
+                               acpi_log_error
+                                   ("Could not get ACPI table at index %u, %s\n",
+                                    i, acpi_format_exception(status));
                                continue;
                        }
                }
 
                table_status = ap_dump_table_buffer(table, instance, address);
-               free(table);
+               ACPI_FREE(table);
 
                if (table_status) {
                        break;
@@ -288,22 +288,22 @@ int ap_dump_table_by_address(char *ascii_address)
 
        status = acpi_ut_strtoul64(ascii_address, 0, &long_address);
        if (ACPI_FAILURE(status)) {
-               fprintf(stderr, "%s: Could not convert to a physical address\n",
-                       ascii_address);
+               acpi_log_error("%s: Could not convert to a physical address\n",
+                              ascii_address);
                return (-1);
        }
 
        address = (acpi_physical_address) long_address;
        status = acpi_os_get_table_by_address(address, &table);
        if (ACPI_FAILURE(status)) {
-               fprintf(stderr, "Could not get table at 0x%8.8X%8.8X, %s\n",
-                       ACPI_FORMAT_UINT64(address),
-                       acpi_format_exception(status));
+               acpi_log_error("Could not get table at 0x%8.8X%8.8X, %s\n",
+                              ACPI_FORMAT_UINT64(address),
+                              acpi_format_exception(status));
                return (-1);
        }
 
        table_status = ap_dump_table_buffer(table, 0, address);
-       free(table);
+       ACPI_FREE(table);
        return (table_status);
 }
 
@@ -329,24 +329,24 @@ int ap_dump_table_by_name(char *signature)
        acpi_status status;
        int table_status;
 
-       if (strlen(signature) != ACPI_NAME_SIZE) {
-               fprintf(stderr,
-                       "Invalid table signature [%s]: must be exactly 4 characters\n",
-                       signature);
+       if (ACPI_STRLEN(signature) != ACPI_NAME_SIZE) {
+               acpi_log_error
+                   ("Invalid table signature [%s]: must be exactly 4 characters\n",
+                    signature);
                return (-1);
        }
 
        /* Table signatures are expected to be uppercase */
 
-       strcpy(local_signature, signature);
+       ACPI_STRCPY(local_signature, signature);
        acpi_ut_strupr(local_signature);
 
        /* To be friendly, handle tables whose signatures do not match the name */
 
        if (ACPI_COMPARE_NAME(local_signature, "FADT")) {
-               strcpy(local_signature, ACPI_SIG_FADT);
+               ACPI_STRCPY(local_signature, ACPI_SIG_FADT);
        } else if (ACPI_COMPARE_NAME(local_signature, "MADT")) {
-               strcpy(local_signature, ACPI_SIG_MADT);
+               ACPI_STRCPY(local_signature, ACPI_SIG_MADT);
        }
 
        /* Dump all instances of this signature (to handle multiple SSDTs) */
@@ -362,14 +362,14 @@ int ap_dump_table_by_name(char *signature)
                                return (0);
                        }
 
-                       fprintf(stderr,
-                               "Could not get ACPI table with signature [%s], %s\n",
-                               local_signature, acpi_format_exception(status));
+                       acpi_log_error
+                           ("Could not get ACPI table with signature [%s], %s\n",
+                            local_signature, acpi_format_exception(status));
                        return (-1);
                }
 
                table_status = ap_dump_table_buffer(table, instance, address);
-               free(table);
+               ACPI_FREE(table);
 
                if (table_status) {
                        break;
@@ -409,43 +409,21 @@ int ap_dump_table_from_file(char *pathname)
        /* File must be at least as long as the table length */
 
        if (table->length > file_size) {
-               fprintf(stderr,
-                       "Table length (0x%X) is too large for input file (0x%X) %s\n",
-                       table->length, file_size, pathname);
+               acpi_log_error
+                   ("Table length (0x%X) is too large for input file (0x%X) %s\n",
+                    table->length, file_size, pathname);
                goto exit;
        }
 
        if (gbl_verbose_mode) {
-               fprintf(stderr,
-                       "Input file:  %s contains table [%4.4s], 0x%X (%u) bytes\n",
-                       pathname, table->signature, file_size, file_size);
+               acpi_log_error
+                   ("Input file:  %s contains table [%4.4s], 0x%X (%u) bytes\n",
+                    pathname, table->signature, file_size, file_size);
        }
 
        table_status = ap_dump_table_buffer(table, 0, 0);
 
 exit:
-       free(table);
+       ACPI_FREE(table);
        return (table_status);
 }
-
-/******************************************************************************
- *
- * FUNCTION:    acpi_os* print functions
- *
- * DESCRIPTION: Used for linkage with ACPICA modules
- *
- ******************************************************************************/
-
-void ACPI_INTERNAL_VAR_XFACE acpi_os_printf(const char *fmt, ...)
-{
-       va_list args;
-
-       va_start(args, fmt);
-       vfprintf(stdout, fmt, args);
-       va_end(args);
-}
-
-void acpi_os_vprintf(const char *fmt, va_list args)
-{
-       vfprintf(stdout, fmt, args);
-}
index 4488accc010b1d4b1b0ebd57ce9a168578491519..d470046a6d81ae9e89b0da80173cb4a277bcfce2 100644 (file)
 #include "acpidump.h"
 #include "acapps.h"
 
+/* Local prototypes */
+
+static int ap_is_existing_file(char *pathname);
+
+static int ap_is_existing_file(char *pathname)
+{
+#ifndef _GNU_EFI
+       struct stat stat_info;
+
+       if (!stat(pathname, &stat_info)) {
+               acpi_log_error("Target path already exists, overwrite? [y|n] ");
+
+               if (getchar() != 'y') {
+                       return (-1);
+               }
+       }
+#endif
+
+       return 0;
+}
+
 /******************************************************************************
  *
  * FUNCTION:    ap_open_output_file
 
 int ap_open_output_file(char *pathname)
 {
-       struct stat stat_info;
-       FILE *file;
+       ACPI_FILE file;
 
        /* If file exists, prompt for overwrite */
 
-       if (!stat(pathname, &stat_info)) {
-               fprintf(stderr,
-                       "Target path already exists, overwrite? [y|n] ");
-
-               if (getchar() != 'y') {
-                       return (-1);
-               }
+       if (ap_is_existing_file(pathname) != 0) {
+               return (-1);
        }
 
        /* Point stdout to the file */
 
-       file = freopen(pathname, "w", stdout);
+       file = acpi_os_open_file(pathname, ACPI_FILE_WRITING);
        if (!file) {
-               perror("Could not open output file");
+               acpi_log_error("Could not open output file: %s\n", pathname);
                return (-1);
        }
 
@@ -106,7 +121,7 @@ int ap_write_to_binary_file(struct acpi_table_header *table, u32 instance)
 {
        char filename[ACPI_NAME_SIZE + 16];
        char instance_str[16];
-       FILE *file;
+       ACPI_FILE file;
        size_t actual;
        u32 table_length;
 
@@ -130,35 +145,37 @@ int ap_write_to_binary_file(struct acpi_table_header *table, u32 instance)
        /* Handle multiple SSDts - create different filenames for each */
 
        if (instance > 0) {
-               sprintf(instance_str, "%u", instance);
-               strcat(filename, instance_str);
+               acpi_ut_snprintf(instance_str, sizeof(instance_str), "%u",
+                                instance);
+               ACPI_STRCAT(filename, instance_str);
        }
 
-       strcat(filename, ACPI_TABLE_FILE_SUFFIX);
+       ACPI_STRCAT(filename, ACPI_TABLE_FILE_SUFFIX);
 
        if (gbl_verbose_mode) {
-               fprintf(stderr,
-                       "Writing [%4.4s] to binary file: %s 0x%X (%u) bytes\n",
-                       table->signature, filename, table->length,
-                       table->length);
+               acpi_log_error
+                   ("Writing [%4.4s] to binary file: %s 0x%X (%u) bytes\n",
+                    table->signature, filename, table->length, table->length);
        }
 
        /* Open the file and dump the entire table in binary mode */
 
-       file = fopen(filename, "wb");
+       file = acpi_os_open_file(filename,
+                                ACPI_FILE_WRITING | ACPI_FILE_BINARY);
        if (!file) {
-               perror("Could not open output file");
+               acpi_log_error("Could not open output file: %s\n", filename);
                return (-1);
        }
 
-       actual = fwrite(table, 1, table_length, file);
+       actual = acpi_os_write_file(file, table, 1, table_length);
        if (actual != table_length) {
-               perror("Error writing binary output file");
-               fclose(file);
+               acpi_log_error("Error writing binary output file: %s\n",
+                              filename);
+               acpi_os_close_file(file);
                return (-1);
        }
 
-       fclose(file);
+       acpi_os_close_file(file);
        return (0);
 }
 
@@ -179,15 +196,16 @@ struct acpi_table_header *ap_get_table_from_file(char *pathname,
                                                 u32 *out_file_size)
 {
        struct acpi_table_header *buffer = NULL;
-       FILE *file;
+       ACPI_FILE file;
        u32 file_size;
        size_t actual;
 
        /* Must use binary mode */
 
-       file = fopen(pathname, "rb");
+       file =
+           acpi_os_open_file(pathname, ACPI_FILE_READING | ACPI_FILE_BINARY);
        if (!file) {
-               perror("Could not open input file");
+               acpi_log_error("Could not open input file: %s\n", pathname);
                return (NULL);
        }
 
@@ -195,27 +213,25 @@ struct acpi_table_header *ap_get_table_from_file(char *pathname,
 
        file_size = cm_get_file_size(file);
        if (file_size == ACPI_UINT32_MAX) {
-               fprintf(stderr,
-                       "Could not get input file size: %s\n", pathname);
+               acpi_log_error("Could not get input file size: %s\n", pathname);
                goto cleanup;
        }
 
        /* Allocate a buffer for the entire file */
 
-       buffer = calloc(1, file_size);
+       buffer = ACPI_ALLOCATE_ZEROED(file_size);
        if (!buffer) {
-               fprintf(stderr,
-                       "Could not allocate file buffer of size: %u\n",
-                       file_size);
+               acpi_log_error("Could not allocate file buffer of size: %u\n",
+                              file_size);
                goto cleanup;
        }
 
        /* Read the entire file */
 
-       actual = fread(buffer, 1, file_size, file);
+       actual = acpi_os_read_file(file, buffer, 1, file_size);
        if (actual != file_size) {
-               fprintf(stderr, "Could not read input file: %s\n", pathname);
-               free(buffer);
+               acpi_log_error("Could not read input file: %s\n", pathname);
+               ACPI_FREE(buffer);
                buffer = NULL;
                goto cleanup;
        }
@@ -223,6 +239,6 @@ struct acpi_table_header *ap_get_table_from_file(char *pathname,
        *out_file_size = file_size;
 
 cleanup:
-       fclose(file);
+       acpi_os_close_file(file);
        return (buffer);
 }
index 51e8d638db18d06b38cdbca6c9285a2fb9ebd194..853b4da22c3e12ef509e26db30651cb6e12de5cd 100644 (file)
@@ -72,7 +72,7 @@ static void ap_display_usage(void);
 
 static int ap_do_options(int argc, char **argv);
 
-static void ap_insert_action(char *argument, u32 to_be_done);
+static int ap_insert_action(char *argument, u32 to_be_done);
 
 /* Table for deferred actions from command line options */
 
@@ -104,7 +104,7 @@ static void ap_display_usage(void)
        ACPI_OPTION("-v", "Display version information");
        ACPI_OPTION("-z", "Verbose mode");
 
-       printf("\nTable Options:\n");
+       ACPI_USAGE_TEXT("\nTable Options:\n");
 
        ACPI_OPTION("-a <Address>", "Get table via a physical address");
        ACPI_OPTION("-f <BinaryFile>", "Get table via a binary file");
@@ -112,9 +112,9 @@ static void ap_display_usage(void)
        ACPI_OPTION("-x", "Do not use but dump XSDT");
        ACPI_OPTION("-x -x", "Do not use or dump XSDT");
 
-       printf("\n"
-              "Invocation without parameters dumps all available tables\n"
-              "Multiple mixed instances of -a, -f, and -n are supported\n\n");
+       ACPI_USAGE_TEXT("\n"
+                       "Invocation without parameters dumps all available tables\n"
+                       "Multiple mixed instances of -a, -f, and -n are supported\n\n");
 }
 
 /******************************************************************************
@@ -124,13 +124,13 @@ static void ap_display_usage(void)
  * PARAMETERS:  argument            - Pointer to the argument for this action
  *              to_be_done          - What to do to process this action
  *
- * RETURN:      None. Exits program if action table becomes full.
+ * RETURN:      Status
  *
  * DESCRIPTION: Add an action item to the action table
  *
  ******************************************************************************/
 
-static void ap_insert_action(char *argument, u32 to_be_done)
+static int ap_insert_action(char *argument, u32 to_be_done)
 {
 
        /* Insert action and check for table overflow */
@@ -140,10 +140,12 @@ static void ap_insert_action(char *argument, u32 to_be_done)
 
        current_action++;
        if (current_action > AP_MAX_ACTIONS) {
-               fprintf(stderr, "Too many table options (max %u)\n",
-                       AP_MAX_ACTIONS);
-               exit(-1);
+               acpi_log_error("Too many table options (max %u)\n",
+                              AP_MAX_ACTIONS);
+               return (-1);
        }
+
+       return (0);
 }
 
 /******************************************************************************
@@ -166,7 +168,8 @@ static int ap_do_options(int argc, char **argv)
 
        /* Command line options */
 
-       while ((j = acpi_getopt(argc, argv, AP_SUPPORTED_OPTIONS)) != EOF)
+       while ((j =
+               acpi_getopt(argc, argv, AP_SUPPORTED_OPTIONS)) != ACPI_OPT_END)
                switch (j) {
                        /*
                         * Global options
@@ -185,12 +188,12 @@ static int ap_do_options(int argc, char **argv)
                case '?':
 
                        ap_display_usage();
-                       exit(0);
+                       return (1);
 
                case 'o':       /* Redirect output to a single file */
 
                        if (ap_open_output_file(acpi_gbl_optarg)) {
-                               exit(-1);
+                               return (-1);
                        }
                        continue;
 
@@ -200,10 +203,10 @@ static int ap_do_options(int argc, char **argv)
                            acpi_ut_strtoul64(acpi_gbl_optarg, 0,
                                              &gbl_rsdp_base);
                        if (ACPI_FAILURE(status)) {
-                               fprintf(stderr,
-                                       "%s: Could not convert to a physical address\n",
-                                       acpi_gbl_optarg);
-                               exit(-1);
+                               acpi_log_error
+                                   ("%s: Could not convert to a physical address\n",
+                                    acpi_gbl_optarg);
+                               return (-1);
                        }
                        continue;
 
@@ -223,13 +226,13 @@ static int ap_do_options(int argc, char **argv)
 
                case 'v':       /* Revision/version */
 
-                       printf(ACPI_COMMON_SIGNON(AP_UTILITY_NAME));
-                       exit(0);
+                       acpi_os_printf(ACPI_COMMON_SIGNON(AP_UTILITY_NAME));
+                       return (1);
 
                case 'z':       /* Verbose mode */
 
                        gbl_verbose_mode = TRUE;
-                       fprintf(stderr, ACPI_COMMON_SIGNON(AP_UTILITY_NAME));
+                       acpi_log_error(ACPI_COMMON_SIGNON(AP_UTILITY_NAME));
                        continue;
 
                        /*
@@ -237,32 +240,40 @@ static int ap_do_options(int argc, char **argv)
                         */
                case 'a':       /* Get table by physical address */
 
-                       ap_insert_action(acpi_gbl_optarg,
-                                        AP_DUMP_TABLE_BY_ADDRESS);
+                       if (ap_insert_action
+                           (acpi_gbl_optarg, AP_DUMP_TABLE_BY_ADDRESS)) {
+                               return (-1);
+                       }
                        break;
 
                case 'f':       /* Get table from a file */
 
-                       ap_insert_action(acpi_gbl_optarg,
-                                        AP_DUMP_TABLE_BY_FILE);
+                       if (ap_insert_action
+                           (acpi_gbl_optarg, AP_DUMP_TABLE_BY_FILE)) {
+                               return (-1);
+                       }
                        break;
 
                case 'n':       /* Get table by input name (signature) */
 
-                       ap_insert_action(acpi_gbl_optarg,
-                                        AP_DUMP_TABLE_BY_NAME);
+                       if (ap_insert_action
+                           (acpi_gbl_optarg, AP_DUMP_TABLE_BY_NAME)) {
+                               return (-1);
+                       }
                        break;
 
                default:
 
                        ap_display_usage();
-                       exit(-1);
+                       return (-1);
                }
 
        /* If there are no actions, this means "get/dump all tables" */
 
        if (current_action == 0) {
-               ap_insert_action(NULL, AP_DUMP_ALL_TABLES);
+               if (ap_insert_action(NULL, AP_DUMP_ALL_TABLES)) {
+                       return (-1);
+               }
        }
 
        return (0);
@@ -280,7 +291,11 @@ static int ap_do_options(int argc, char **argv)
  *
  ******************************************************************************/
 
+#ifndef _GNU_EFI
 int ACPI_SYSTEM_XFACE main(int argc, char *argv[])
+#else
+int ACPI_SYSTEM_XFACE acpi_main(int argc, char *argv[])
+#endif
 {
        int status = 0;
        struct ap_dump_action *action;
@@ -288,11 +303,17 @@ int ACPI_SYSTEM_XFACE main(int argc, char *argv[])
        u32 i;
 
        ACPI_DEBUG_INITIALIZE();        /* For debug version only */
+       acpi_os_initialize();
+       gbl_output_file = ACPI_FILE_OUT;
 
        /* Process command line options */
 
-       if (ap_do_options(argc, argv)) {
-               return (-1);
+       status = ap_do_options(argc, argv);
+       if (status > 0) {
+               return (0);
+       }
+       if (status < 0) {
+               return (status);
        }
 
        /* Get/dump ACPI table(s) as requested */
@@ -322,9 +343,8 @@ int ACPI_SYSTEM_XFACE main(int argc, char *argv[])
 
                default:
 
-                       fprintf(stderr,
-                               "Internal error, invalid action: 0x%X\n",
-                               action->to_be_done);
+                       acpi_log_error("Internal error, invalid action: 0x%X\n",
+                                      action->to_be_done);
                        return (-1);
                }
 
@@ -333,18 +353,18 @@ int ACPI_SYSTEM_XFACE main(int argc, char *argv[])
                }
        }
 
-       if (gbl_output_file) {
+       if (gbl_output_filename) {
                if (gbl_verbose_mode) {
 
                        /* Summary for the output file */
 
                        file_size = cm_get_file_size(gbl_output_file);
-                       fprintf(stderr,
-                               "Output file %s contains 0x%X (%u) bytes\n\n",
-                               gbl_output_filename, file_size, file_size);
+                       acpi_log_error
+                           ("Output file %s contains 0x%X (%u) bytes\n\n",
+                            gbl_output_filename, file_size, file_size);
                }
 
-               fclose(gbl_output_file);
+               acpi_os_close_file(gbl_output_file);
        }
 
        return (status);