Merge branch 'pm-qos'
authorRafael J. Wysocki <rjw@sisk.pl>
Mon, 24 Sep 2012 19:41:31 +0000 (21:41 +0200)
committerRafael J. Wysocki <rjw@sisk.pl>
Mon, 24 Sep 2012 19:41:31 +0000 (21:41 +0200)
* pm-qos:
  Revert "PM QoS: Use spinlock in the per-device PM QoS constraints code"

572 files changed:
Documentation/ABI/testing/sysfs-bus-pci
Documentation/ABI/testing/sysfs-devices-system-cpu
Documentation/cpu-freq/boost.txt [new file with mode: 0644]
Documentation/cpuidle/sysfs.txt
Documentation/devicetree/bindings/cpufreq/cpufreq-cpu0.txt [new file with mode: 0644]
Documentation/devicetree/bindings/power/opp.txt [new file with mode: 0644]
Documentation/feature-removal-schedule.txt
Documentation/i2c/busses/i2c-i801
MAINTAINERS
Makefile
arch/arm/Kconfig
arch/arm/Kconfig.debug
arch/arm/Makefile
arch/arm/boot/compressed/head.S
arch/arm/boot/dts/at91sam9g25ek.dts
arch/arm/configs/armadillo800eva_defconfig
arch/arm/include/asm/assembler.h
arch/arm/include/asm/dma-mapping.h
arch/arm/include/asm/memory.h
arch/arm/include/asm/tlb.h
arch/arm/include/asm/uaccess.h
arch/arm/kernel/hw_breakpoint.c
arch/arm/kernel/smp.c
arch/arm/kernel/traps.c
arch/arm/lib/delay.c
arch/arm/lib/getuser.S
arch/arm/lib/putuser.S
arch/arm/mach-at91/at91rm9200_time.c
arch/arm/mach-at91/at91sam9260_devices.c
arch/arm/mach-at91/at91sam9261_devices.c
arch/arm/mach-at91/at91sam9263_devices.c
arch/arm/mach-at91/at91sam9g45_devices.c
arch/arm/mach-at91/at91sam9rl_devices.c
arch/arm/mach-at91/clock.c
arch/arm/mach-gemini/irq.c
arch/arm/mach-imx/clk-imx25.c
arch/arm/mach-imx/clk-imx35.c
arch/arm/mach-kirkwood/common.c
arch/arm/mach-kirkwood/db88f6281-bp-setup.c
arch/arm/mach-omap2/Kconfig
arch/arm/mach-omap2/Makefile
arch/arm/mach-omap2/clock33xx_data.c
arch/arm/mach-omap2/clockdomain2xxx_3xxx.c
arch/arm/mach-omap2/cm-regbits-34xx.h
arch/arm/mach-omap2/omap-wakeupgen.c
arch/arm/mach-omap2/omap_hwmod.c
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
arch/arm/mach-omap2/omap_hwmod_44xx_data.c
arch/arm/mach-omap2/timer.c
arch/arm/mach-shmobile/Makefile
arch/arm/mach-shmobile/board-ap4evb.c
arch/arm/mach-shmobile/board-armadillo800eva.c
arch/arm/mach-shmobile/board-mackerel.c
arch/arm/mach-shmobile/board-marzen.c
arch/arm/mach-shmobile/common.c [deleted file]
arch/arm/mach-shmobile/cpuidle.c
arch/arm/mach-shmobile/include/mach/common.h
arch/arm/mach-shmobile/include/mach/pm-rmobile.h
arch/arm/mach-shmobile/include/mach/r8a7740.h
arch/arm/mach-shmobile/include/mach/r8a7779.h
arch/arm/mach-shmobile/include/mach/sh7372.h
arch/arm/mach-shmobile/intc-sh73a0.c
arch/arm/mach-shmobile/pm-r8a7740.c
arch/arm/mach-shmobile/pm-r8a7779.c
arch/arm/mach-shmobile/pm-rmobile.c
arch/arm/mach-shmobile/pm-sh7372.c
arch/arm/mach-shmobile/setup-r8a7740.c
arch/arm/mach-shmobile/setup-r8a7779.c
arch/arm/mach-shmobile/setup-sh7372.c
arch/arm/mm/context.c
arch/arm/mm/dma-mapping.c
arch/arm/mm/mm.h
arch/arm/mm/mmu.c
arch/arm/plat-omap/sram.c
arch/blackfin/Kconfig
arch/blackfin/Makefile
arch/blackfin/include/asm/smp.h
arch/blackfin/mach-common/smp.c
arch/powerpc/include/asm/processor.h
arch/powerpc/kernel/asm-offsets.c
arch/powerpc/kernel/dbell.c
arch/powerpc/kernel/entry_64.S
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/kernel/idle_power7.S
arch/powerpc/kernel/process.c
arch/powerpc/kernel/smp.c
arch/powerpc/kernel/sysfs.c
arch/powerpc/kernel/time.c
arch/powerpc/kernel/traps.c
arch/powerpc/lib/code-patching.c
arch/powerpc/mm/numa.c
arch/powerpc/platforms/powernv/smp.c
arch/powerpc/sysdev/xics/icp-hv.c
arch/s390/oprofile/init.c
arch/um/os-Linux/time.c
arch/x86/include/asm/msr-index.h
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/cpu/perf_event_intel_lbr.c
arch/x86/kernel/microcode_core.c
arch/x86/kvm/i8259.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
arch/x86/xen/mmu.c
arch/x86/xen/p2m.c
crypto/authenc.c
drivers/acpi/bus.c
drivers/acpi/power.c
drivers/acpi/processor_driver.c
drivers/acpi/processor_idle.c
drivers/acpi/processor_perflib.c
drivers/ata/ahci.c
drivers/base/dma-contiguous.c
drivers/base/platform.c
drivers/base/power/domain.c
drivers/base/power/main.c
drivers/base/power/opp.c
drivers/base/power/power.h
drivers/base/power/runtime.c
drivers/base/power/wakeup.c
drivers/bluetooth/ath3k.c
drivers/bluetooth/btusb.c
drivers/clocksource/sh_cmt.c
drivers/clocksource/sh_mtu2.c
drivers/clocksource/sh_tmu.c
drivers/cpufreq/Kconfig
drivers/cpufreq/Kconfig.x86
drivers/cpufreq/Makefile
drivers/cpufreq/acpi-cpufreq.c
drivers/cpufreq/cpufreq-cpu0.c [new file with mode: 0644]
drivers/cpufreq/cpufreq_conservative.c
drivers/cpufreq/cpufreq_ondemand.c
drivers/cpufreq/longhaul.h
drivers/cpufreq/omap-cpufreq.c
drivers/cpufreq/powernow-k8.c
drivers/cpufreq/powernow-k8.h
drivers/cpuidle/driver.c
drivers/cpuidle/governors/ladder.c
drivers/crypto/caam/key_gen.c
drivers/extcon/extcon-max77693.c
drivers/gpio/Kconfig
drivers/gpio/gpio-em.c
drivers/gpio/gpio-rdc321x.c
drivers/gpio/gpiolib-of.c
drivers/gpu/drm/ast/ast_drv.c
drivers/gpu/drm/ast/ast_mode.c
drivers/gpu/drm/cirrus/cirrus_drv.c
drivers/gpu/drm/exynos/Kconfig
drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
drivers/gpu/drm/exynos/exynos_drm_drv.c
drivers/gpu/drm/exynos/exynos_drm_fimd.c
drivers/gpu/drm/exynos/exynos_drm_g2d.c
drivers/gpu/drm/exynos/exynos_drm_gem.c
drivers/gpu/drm/exynos/exynos_drm_hdmi.c
drivers/gpu/drm/exynos/exynos_drm_plane.c
drivers/gpu/drm/exynos/exynos_drm_vidi.c
drivers/gpu/drm/exynos/exynos_hdmi.c
drivers/gpu/drm/exynos/exynos_mixer.c
drivers/gpu/drm/gma500/oaktrail_device.c
drivers/gpu/drm/i810/i810_dma.c
drivers/gpu/drm/i810/i810_drv.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_panel.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_sdvo.c
drivers/gpu/drm/mgag200/mgag200_drv.c
drivers/gpu/drm/nouveau/nouveau_display.c
drivers/gpu/drm/nouveau/nv50_gpio.c
drivers/gpu/drm/nouveau/nvd0_display.c
drivers/gpu/drm/radeon/atombios_crtc.c
drivers/gpu/drm/radeon/radeon_fence.c
drivers/gpu/drm/savage/savage_drv.c
drivers/gpu/drm/sis/sis_drv.c
drivers/gpu/drm/tdfx/tdfx_drv.c
drivers/gpu/drm/udl/udl_drv.c
drivers/gpu/drm/via/via_drv.c
drivers/gpu/drm/vmwgfx/Kconfig
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
drivers/hid/hid-core.c
drivers/hid/hid-logitech-dj.c
drivers/hid/usbhid/hid-quirks.c
drivers/hwmon/ina2xx.c
drivers/hwmon/twl4030-madc-hwmon.c
drivers/i2c/algos/i2c-algo-pca.c
drivers/i2c/busses/Kconfig
drivers/i2c/busses/Makefile
drivers/i2c/busses/i2c-designware-core.c
drivers/i2c/busses/i2c-i801.c
drivers/i2c/busses/i2c-mxs.c
drivers/i2c/busses/i2c-pnx.c
drivers/i2c/i2c-core.c
drivers/iio/adc/at91_adc.c
drivers/input/keyboard/imx_keypad.c
drivers/input/serio/i8042-x86ia64io.h
drivers/input/tablet/wacom_wac.c
drivers/input/touchscreen/edt-ft5x06.c
drivers/isdn/hardware/mISDN/avmfritz.c
drivers/isdn/hardware/mISDN/hfcmulti.c
drivers/isdn/hardware/mISDN/mISDNipac.c
drivers/isdn/hardware/mISDN/mISDNisar.c
drivers/isdn/hardware/mISDN/netjet.c
drivers/isdn/hardware/mISDN/w6692.c
drivers/isdn/mISDN/hwchannel.c
drivers/mfd/88pm800.c
drivers/mfd/88pm805.c
drivers/mfd/88pm860x-core.c
drivers/mfd/aat2870-core.c
drivers/mfd/ab3100-core.c
drivers/mfd/ab8500-core.c
drivers/mfd/arizona-core.c
drivers/mfd/asic3.c
drivers/mfd/cs5535-mfd.c
drivers/mfd/da9052-core.c
drivers/mfd/davinci_voicecodec.c
drivers/mfd/db8500-prcmu.c
drivers/mfd/htc-pasic3.c
drivers/mfd/intel_msic.c
drivers/mfd/janz-cmodio.c
drivers/mfd/jz4740-adc.c
drivers/mfd/lm3533-core.c
drivers/mfd/lpc_ich.c
drivers/mfd/lpc_sch.c
drivers/mfd/max77686.c
drivers/mfd/max77693-irq.c
drivers/mfd/max77693.c
drivers/mfd/max8925-core.c
drivers/mfd/max8997.c
drivers/mfd/max8998.c
drivers/mfd/mc13xxx-core.c
drivers/mfd/mfd-core.c
drivers/mfd/palmas.c
drivers/mfd/rc5t583.c
drivers/mfd/rdc321x-southbridge.c
drivers/mfd/sec-core.c
drivers/mfd/sta2x11-mfd.c
drivers/mfd/stmpe.c
drivers/mfd/t7l66xb.c
drivers/mfd/tc3589x.c
drivers/mfd/tc6387xb.c
drivers/mfd/tc6393xb.c
drivers/mfd/ti-ssp.c
drivers/mfd/timberdale.c
drivers/mfd/tps6105x.c
drivers/mfd/tps6507x.c
drivers/mfd/tps65090.c
drivers/mfd/tps65217.c
drivers/mfd/tps6586x.c
drivers/mfd/tps65910.c
drivers/mfd/tps65912-core.c
drivers/mfd/twl4030-audio.c
drivers/mfd/twl6040-core.c
drivers/mfd/vx855.c
drivers/mfd/wl1273-core.c
drivers/mfd/wm831x-core.c
drivers/mfd/wm8400-core.c
drivers/mfd/wm8994-core.c
drivers/mmc/card/block.c
drivers/mmc/host/atmel-mci.c
drivers/mmc/host/bfin_sdh.c
drivers/mmc/host/dw_mmc.c
drivers/mmc/host/mxs-mmc.c
drivers/mmc/host/omap.c
drivers/mmc/host/sdhci-esdhc.h
drivers/mtd/ubi/vtbl.c
drivers/net/can/mcp251x.c
drivers/net/can/sja1000/sja1000_platform.c
drivers/net/can/softing/softing_fw.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
drivers/net/ethernet/cirrus/cs89x0.c
drivers/net/ethernet/emulex/benet/be_cmds.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/freescale/gianfar.c
drivers/net/ethernet/i825xx/znet.c
drivers/net/ethernet/ibm/ibmveth.c
drivers/net/ethernet/intel/e1000e/e1000.h
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/mellanox/mlx4/mcg.c
drivers/net/ethernet/mellanox/mlx4/mlx4.h
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
drivers/net/ethernet/seeq/sgiseeq.c
drivers/net/ethernet/sfc/ethtool.c
drivers/net/ethernet/stmicro/stmmac/common.h
drivers/net/ethernet/stmicro/stmmac/descs.h
drivers/net/ethernet/stmicro/stmmac/descs_com.h
drivers/net/ethernet/stmicro/stmmac/dwmac100.h
drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
drivers/net/ethernet/stmicro/stmmac/mmc.h
drivers/net/ethernet/stmicro/stmmac/mmc_core.c
drivers/net/ethernet/stmicro/stmmac/stmmac.h
drivers/net/ethernet/stmicro/stmmac/stmmac_timer.h
drivers/net/ethernet/ti/davinci_mdio.c
drivers/net/fddi/skfp/pmf.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/sierra_net.c
drivers/net/usb/usbnet.c
drivers/net/wan/ixp4xx_hss.c
drivers/net/wireless/ath/ath5k/eeprom.c
drivers/net/wireless/ath/ath5k/eeprom.h
drivers/net/wireless/ath/ath9k/ar9003_paprd.c
drivers/net/wireless/ath/ath9k/ar9003_phy.h
drivers/net/wireless/ath/ath9k/gpio.c
drivers/net/wireless/ath/ath9k/hw.c
drivers/net/wireless/ath/ath9k/hw.h
drivers/net/wireless/ath/ath9k/link.c
drivers/net/wireless/ath/ath9k/xmit.c
drivers/net/wireless/brcm80211/brcmfmac/usb.c
drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
drivers/net/wireless/ipw2x00/ipw2100.c
drivers/net/wireless/iwlwifi/dvm/debugfs.c
drivers/net/wireless/iwlwifi/pcie/internal.h
drivers/net/wireless/iwlwifi/pcie/rx.c
drivers/net/wireless/iwlwifi/pcie/trans.c
drivers/net/wireless/libertas/if_sdio.c
drivers/net/wireless/mwifiex/cmdevt.c
drivers/net/wireless/rt2x00/rt2400pci.c
drivers/net/wireless/rt2x00/rt2400pci.h
drivers/net/wireless/rt2x00/rt2500pci.c
drivers/net/wireless/rt2x00/rt2500usb.c
drivers/net/wireless/rt2x00/rt2500usb.h
drivers/net/wireless/rt2x00/rt2800lib.c
drivers/net/wireless/rt2x00/rt2800pci.c
drivers/net/wireless/rt2x00/rt2800usb.c
drivers/net/wireless/rt2x00/rt2x00dev.c
drivers/net/wireless/rt2x00/rt61pci.c
drivers/net/wireless/rt2x00/rt61pci.h
drivers/net/wireless/rt2x00/rt73usb.c
drivers/net/wireless/rt2x00/rt73usb.h
drivers/net/xen-netfront.c
drivers/pci/pci-driver.c
drivers/pci/pci-sysfs.c
drivers/pci/pci.c
drivers/pci/pcie/portdrv_pci.c
drivers/pci/probe.c
drivers/platform/x86/acer-wmi.c
drivers/platform/x86/apple-gmux.c
drivers/platform/x86/asus-laptop.c
drivers/platform/x86/asus-wmi.c
drivers/platform/x86/eeepc-laptop.c
drivers/platform/x86/samsung-laptop.c
drivers/platform/x86/thinkpad_acpi.c
drivers/pwm/pwm-tiecap.c
drivers/pwm/pwm-tiehrpwm.c
drivers/regulator/tps65217-regulator.c
drivers/rtc/rtc-at91sam9.c
drivers/scsi/megaraid/megaraid_sas_base.c
drivers/scsi/mpt2sas/mpt2sas_base.c
drivers/scsi/scsi_error.c
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_scan.c
drivers/staging/android/android_alarm.h
drivers/staging/comedi/drivers/amplc_dio200.c
drivers/staging/comedi/drivers/amplc_pc236.c
drivers/staging/comedi/drivers/amplc_pc263.c
drivers/staging/comedi/drivers/amplc_pci224.c
drivers/staging/comedi/drivers/amplc_pci230.c
drivers/staging/comedi/drivers/das08.c
drivers/staging/iio/accel/lis3l02dq_ring.c
drivers/staging/iio/adc/ad7192.c
drivers/staging/iio/gyro/adis16260_core.c
drivers/staging/iio/imu/adis16400_core.c
drivers/staging/iio/meter/ade7753.c
drivers/staging/iio/meter/ade7754.c
drivers/staging/iio/meter/ade7759.c
drivers/staging/nvec/nvec.c
drivers/staging/omapdrm/omap_connector.c
drivers/staging/ozwpan/ozcdev.c
drivers/staging/rtl8712/recv_linux.c
drivers/staging/vt6656/dpc.c
drivers/staging/vt6656/rxtx.c
drivers/staging/wlan-ng/cfg80211.c
drivers/staging/zcache/zcache-main.c
drivers/target/iscsi/iscsi_target_login.c
drivers/target/target_core_alua.c
drivers/target/target_core_device.c
drivers/target/target_core_iblock.c
drivers/target/target_core_pr.c
drivers/target/target_core_pscsi.c
drivers/target/target_core_spc.c
drivers/target/target_core_transport.c
drivers/tty/serial/imx.c
drivers/usb/chipidea/udc.c
drivers/usb/class/cdc-wdm.c
drivers/usb/core/quirks.c
drivers/usb/dwc3/core.c
drivers/usb/dwc3/ep0.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/at91_udc.c
drivers/usb/gadget/dummy_hcd.c
drivers/usb/gadget/f_fs.c
drivers/usb/gadget/s3c-hsotg.c
drivers/usb/gadget/u_serial.c
drivers/usb/host/ehci-q.c
drivers/usb/host/ohci-at91.c
drivers/usb/host/pci-quirks.c
drivers/usb/host/pci-quirks.h
drivers/usb/host/xhci-hub.c
drivers/usb/host/xhci-plat.c
drivers/usb/host/xhci.c
drivers/usb/host/xhci.h
drivers/usb/musb/musb_host.c
drivers/usb/musb/musbhsdma.c
drivers/usb/musb/tusb6010.c
drivers/usb/renesas_usbhs/fifo.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio_ids.h
drivers/usb/serial/option.c
drivers/video/auo_k190x.c
drivers/video/console/bitblit.c
drivers/video/console/fbcon.c
drivers/video/mb862xx/mb862xxfbdrv.c
drivers/video/omap2/dss/sdi.c
drivers/video/omap2/omapfb/omapfb-main.c
drivers/xen/swiotlb-xen.c
drivers/xen/xen-acpi-processor.c
drivers/xen/xen-pciback/pci_stub.c
fs/btrfs/qgroup.c
fs/cifs/cifssmb.c
fs/cifs/dir.c
fs/cifs/file.c
fs/cifs/inode.c
fs/cifs/link.c
fs/cifs/smb2misc.c
fs/cifs/smb2pdu.h
fs/cifs/transport.c
fs/ecryptfs/file.c
fs/ecryptfs/inode.c
fs/ecryptfs/main.c
fs/ext3/inode.c
fs/fuse/control.c
fs/fuse/cuse.c
fs/fuse/dev.c
fs/fuse/inode.c
fs/gfs2/file.c
fs/gfs2/inode.c
fs/gfs2/rgrp.c
fs/nfs/file.c
fs/nfs/inode.c
fs/nfs/nfs3proc.c
fs/nfs/nfs4file.c
fs/nfs/nfs4proc.c
fs/nfs/nfs4xdr.c
fs/nfs/super.c
fs/stat.c
fs/udf/file.c
include/acpi/processor.h
include/drm/drm_fourcc.h
include/linux/atmel-ssc.h
include/linux/clockchips.h
include/linux/device.h
include/linux/i2c-pnx.h
include/linux/kernel.h
include/linux/kobject.h
include/linux/mISDNhw.h
include/linux/mfd/core.h
include/linux/mfd/tps65217.h
include/linux/mlx4/device.h
include/linux/mmc/card.h
include/linux/nfs_fs.h
include/linux/nfs_xdr.h
include/linux/opp.h
include/linux/pci_ids.h
include/linux/perf_event.h
include/linux/pm.h
include/linux/pm_domain.h
include/linux/sched.h
include/linux/sunrpc/xprt.h
include/net/bluetooth/smp.h
include/net/netfilter/nf_conntrack_ecache.h
include/net/xfrm.h
include/target/target_core_backend.h
include/target/target_core_base.h
kernel/events/core.c
kernel/events/hw_breakpoint.c
kernel/power/Kconfig
kernel/power/poweroff.c
kernel/power/process.c
kernel/sched/core.c
kernel/sched/fair.c
kernel/sched/rt.c
kernel/sched/sched.h
kernel/time/clockevents.c
kernel/time/tick-sched.c
kernel/time/timekeeping.c
kernel/workqueue.c
lib/digsig.c
mm/memblock.c
mm/mempolicy.c
net/bluetooth/hci_conn.c
net/bluetooth/l2cap_core.c
net/bluetooth/l2cap_sock.c
net/bluetooth/smp.c
net/bridge/netfilter/ebt_log.c
net/caif/cfsrvl.c
net/core/dev.c
net/core/netpoll.c
net/core/pktgen.c
net/core/sock.c
net/ipv4/ipmr.c
net/ipv4/netfilter/nf_nat_sip.c
net/ipv4/route.c
net/ipv4/tcp_input.c
net/ipv4/udp.c
net/ipv6/esp6.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c
net/l2tp/l2tp_core.c
net/l2tp/l2tp_core.h
net/l2tp/l2tp_eth.c
net/mac80211/cfg.c
net/mac80211/mlme.c
net/mac80211/tx.c
net/netfilter/ipvs/ip_vs_ctl.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nf_conntrack_proto_tcp.c
net/netfilter/nfnetlink_log.c
net/netfilter/xt_LOG.c
net/netlink/af_netlink.c
net/netrom/af_netrom.c
net/openvswitch/actions.c
net/openvswitch/datapath.c
net/openvswitch/flow.h
net/packet/af_packet.c
net/sched/sch_cbq.c
net/sched/sch_fq_codel.c
net/sched/sch_gred.c
net/sctp/output.c
net/socket.c
net/sunrpc/xprt.c
net/sunrpc/xprtrdma/transport.c
net/sunrpc/xprtsock.c
net/wireless/nl80211.c
net/xfrm/xfrm_input.c
net/xfrm/xfrm_replay.c
net/xfrm/xfrm_state.c
scripts/Makefile.fwinst
scripts/link-vmlinux.sh
sound/core/compress_offload.c
sound/pci/hda/hda_codec.c
sound/pci/hda/hda_codec.h
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_sigmatel.c
sound/pci/ice1712/prodigy_hifi.c
sound/soc/codecs/arizona.c
sound/soc/codecs/mc13783.c
sound/soc/codecs/wm8904.c
sound/soc/fsl/imx-sgtl5000.c
sound/soc/omap/am3517evm.c
sound/soc/samsung/dma.c
sound/soc/soc-dapm.c
sound/soc/spear/spear_pcm.c
sound/soc/tegra/tegra_alc5632.c
sound/soc/tegra/tegra_pcm.c
sound/soc/ux500/ux500_msp_i2s.c
sound/usb/card.c
sound/usb/endpoint.c
sound/usb/endpoint.h
sound/usb/pcm.c

index 34f51100f0299cb5a96a77984def99dbfdd9367d..dff1f48d252d8f0d36aa31cccf6514cb02b3d333 100644 (file)
@@ -210,3 +210,15 @@ Users:
                firmware assigned instance number of the PCI
                device that can help in understanding the firmware
                intended order of the PCI device.
+
+What:          /sys/bus/pci/devices/.../d3cold_allowed
+Date:          July 2012
+Contact:       Huang Ying <ying.huang@intel.com>
+Description:
+               d3cold_allowed is bit to control whether the corresponding PCI
+               device can be put into D3Cold state.  If it is cleared, the
+               device will never be put into D3Cold state.  If it is set, the
+               device may be put into D3Cold state if other requirements are
+               satisfied too.  Reading this attribute will show the current
+               value of d3cold_allowed bit.  Writing this attribute will set
+               the value of d3cold_allowed bit.
index 5dab36448b44b84bf869b9316d50af47f00328b6..6943133afcb8388916a87e2bd66923fe26a97906 100644 (file)
@@ -176,3 +176,14 @@ Description:       Disable L3 cache indices
                All AMD processors with L3 caches provide this functionality.
                For details, see BKDGs at
                http://developer.amd.com/documentation/guides/Pages/default.aspx
+
+
+What:          /sys/devices/system/cpu/cpufreq/boost
+Date:          August 2012
+Contact:       Linux kernel mailing list <linux-kernel@vger.kernel.org>
+Description:   Processor frequency boosting control
+
+               This switch controls the boost setting for the whole system.
+               Boosting allows the CPU and the firmware to run at a frequency
+               beyound it's nominal limit.
+               More details can be found in Documentation/cpu-freq/boost.txt
diff --git a/Documentation/cpu-freq/boost.txt b/Documentation/cpu-freq/boost.txt
new file mode 100644 (file)
index 0000000..9b4edfc
--- /dev/null
@@ -0,0 +1,93 @@
+Processor boosting control
+
+       - information for users -
+
+Quick guide for the impatient:
+--------------------
+/sys/devices/system/cpu/cpufreq/boost
+controls the boost setting for the whole system. You can read and write
+that file with either "0" (boosting disabled) or "1" (boosting allowed).
+Reading or writing 1 does not mean that the system is boosting at this
+very moment, but only that the CPU _may_ raise the frequency at it's
+discretion.
+--------------------
+
+Introduction
+-------------
+Some CPUs support a functionality to raise the operating frequency of
+some cores in a multi-core package if certain conditions apply, mostly
+if the whole chip is not fully utilized and below it's intended thermal
+budget. This is done without operating system control by a combination
+of hardware and firmware.
+On Intel CPUs this is called "Turbo Boost", AMD calls it "Turbo-Core",
+in technical documentation "Core performance boost". In Linux we use
+the term "boost" for convenience.
+
+Rationale for disable switch
+----------------------------
+
+Though the idea is to just give better performance without any user
+intervention, sometimes the need arises to disable this functionality.
+Most systems offer a switch in the (BIOS) firmware to disable the
+functionality at all, but a more fine-grained and dynamic control would
+be desirable:
+1. While running benchmarks, reproducible results are important. Since
+   the boosting functionality depends on the load of the whole package,
+   single thread performance can vary. By explicitly disabling the boost
+   functionality at least for the benchmark's run-time the system will run
+   at a fixed frequency and results are reproducible again.
+2. To examine the impact of the boosting functionality it is helpful
+   to do tests with and without boosting.
+3. Boosting means overclocking the processor, though under controlled
+   conditions. By raising the frequency and the voltage the processor
+   will consume more power than without the boosting, which may be
+   undesirable for instance for mobile users. Disabling boosting may
+   save power here, though this depends on the workload.
+
+
+User controlled switch
+----------------------
+
+To allow the user to toggle the boosting functionality, the acpi-cpufreq
+driver exports a sysfs knob to disable it. There is a file:
+/sys/devices/system/cpu/cpufreq/boost
+which can either read "0" (boosting disabled) or "1" (boosting enabled).
+Reading the file is always supported, even if the processor does not
+support boosting. In this case the file will be read-only and always
+reads as "0". Explicitly changing the permissions and writing to that
+file anyway will return EINVAL.
+
+On supported CPUs one can write either a "0" or a "1" into this file.
+This will either disable the boost functionality on all cores in the
+whole system (0) or will allow the hardware to boost at will (1).
+
+Writing a "1" does not explicitly boost the system, but just allows the
+CPU (and the firmware) to boost at their discretion. Some implementations
+take external factors like the chip's temperature into account, so
+boosting once does not necessarily mean that it will occur every time
+even using the exact same software setup.
+
+
+AMD legacy cpb switch
+---------------------
+The AMD powernow-k8 driver used to support a very similar switch to
+disable or enable the "Core Performance Boost" feature of some AMD CPUs.
+This switch was instantiated in each CPU's cpufreq directory
+(/sys/devices/system/cpu[0-9]*/cpufreq) and was called "cpb".
+Though the per CPU existence hints at a more fine grained control, the
+actual implementation only supported a system-global switch semantics,
+which was simply reflected into each CPU's file. Writing a 0 or 1 into it
+would pull the other CPUs to the same state.
+For compatibility reasons this file and its behavior is still supported
+on AMD CPUs, though it is now protected by a config switch
+(X86_ACPI_CPUFREQ_CPB). On Intel CPUs this file will never be created,
+even with the config option set.
+This functionality is considered legacy and will be removed in some future
+kernel version.
+
+More fine grained boosting control
+----------------------------------
+
+Technically it is possible to switch the boosting functionality at least
+on a per package basis, for some CPUs even per core. Currently the driver
+does not support it, but this may be implemented in the future.
index 9d28a3406e745589383021732fe7941ed9aad1cb..b6f44f490ed7839f0963acfc0c53ed2537fa35e2 100644 (file)
@@ -76,9 +76,17 @@ total 0
 
 
 * desc : Small description about the idle state (string)
-* disable : Option to disable this idle state (bool)
+* disable : Option to disable this idle state (bool) -> see note below
 * latency : Latency to exit out of this idle state (in microseconds)
 * name : Name of the idle state (string)
 * power : Power consumed while in this idle state (in milliwatts)
 * time : Total time spent in this idle state (in microseconds)
 * usage : Number of times this state was entered (count)
+
+Note:
+The behavior and the effect of the disable variable depends on the
+implementation of a particular governor. In the ladder governor, for
+example, it is not coherent, i.e. if one is disabling a light state,
+then all deeper states are disabled as well, but the disable variable
+does not reflect it. Likewise, if one enables a deep state but a lighter
+state still is disabled, then this has no effect.
diff --git a/Documentation/devicetree/bindings/cpufreq/cpufreq-cpu0.txt b/Documentation/devicetree/bindings/cpufreq/cpufreq-cpu0.txt
new file mode 100644 (file)
index 0000000..4416ccc
--- /dev/null
@@ -0,0 +1,55 @@
+Generic CPU0 cpufreq driver
+
+It is a generic cpufreq driver for CPU0 frequency management.  It
+supports both uniprocessor (UP) and symmetric multiprocessor (SMP)
+systems which share clock and voltage across all CPUs.
+
+Both required and optional properties listed below must be defined
+under node /cpus/cpu@0.
+
+Required properties:
+- operating-points: Refer to Documentation/devicetree/bindings/power/opp.txt
+  for details
+
+Optional properties:
+- clock-latency: Specify the possible maximum transition latency for clock,
+  in unit of nanoseconds.
+- voltage-tolerance: Specify the CPU voltage tolerance in percentage.
+
+Examples:
+
+cpus {
+       #address-cells = <1>;
+       #size-cells = <0>;
+
+       cpu@0 {
+               compatible = "arm,cortex-a9";
+               reg = <0>;
+               next-level-cache = <&L2>;
+               operating-points = <
+                       /* kHz    uV */
+                       792000  1100000
+                       396000  950000
+                       198000  850000
+               >;
+               transition-latency = <61036>; /* two CLK32 periods */
+       };
+
+       cpu@1 {
+               compatible = "arm,cortex-a9";
+               reg = <1>;
+               next-level-cache = <&L2>;
+       };
+
+       cpu@2 {
+               compatible = "arm,cortex-a9";
+               reg = <2>;
+               next-level-cache = <&L2>;
+       };
+
+       cpu@3 {
+               compatible = "arm,cortex-a9";
+               reg = <3>;
+               next-level-cache = <&L2>;
+       };
+};
diff --git a/Documentation/devicetree/bindings/power/opp.txt b/Documentation/devicetree/bindings/power/opp.txt
new file mode 100644 (file)
index 0000000..74499e5
--- /dev/null
@@ -0,0 +1,25 @@
+* Generic OPP Interface
+
+SoCs have a standard set of tuples consisting of frequency and
+voltage pairs that the device will support per voltage domain. These
+are called Operating Performance Points or OPPs.
+
+Properties:
+- operating-points: An array of 2-tuples items, and each item consists
+  of frequency and voltage like <freq-kHz vol-uV>.
+       freq: clock frequency in kHz
+       vol: voltage in microvolt
+
+Examples:
+
+cpu@0 {
+       compatible = "arm,cortex-a9";
+       reg = <0>;
+       next-level-cache = <&L2>;
+       operating-points = <
+               /* kHz    uV */
+               792000  1100000
+               396000  950000
+               198000  850000
+       >;
+};
index afaff312bf415acb59449aeb9bee4c848c197ff2..f4d8c7105fcdd2e07af12d61154d9fb07c772f15 100644 (file)
@@ -579,7 +579,7 @@ Why:        KVM tracepoints provide mostly equivalent information in a much more
 ----------------------------
 
 What:  at91-mci driver ("CONFIG_MMC_AT91")
-When:  3.7
+When:  3.8
 Why:   There are two mci drivers: at91-mci and atmel-mci. The PDC support
        was added to atmel-mci as a first step to support more chips.
        Then at91-mci was kept only for old IP versions (on at91rm9200 and
index 615142da4ef64c6ec8ae87893425046a1564e786..157416e78cc4168859c89c88b13bcfeb71c4848f 100644 (file)
@@ -21,6 +21,7 @@ Supported adapters:
   * Intel DH89xxCC (PCH)
   * Intel Panther Point (PCH)
   * Intel Lynx Point (PCH)
+  * Intel Lynx Point-LP (PCH)
    Datasheets: Publicly available at the Intel website
 
 On Intel Patsburg and later chipsets, both the normal host SMBus controller
index fdc0119963e70f12c4f9e1eae3a613447e7d8781..53cc13c82cb1d74646c310e78d01a3509841a669 100644 (file)
@@ -3388,7 +3388,7 @@ M:        "Wolfram Sang (embedded platforms)" <w.sang@pengutronix.de>
 L:     linux-i2c@vger.kernel.org
 W:     http://i2c.wiki.kernel.org/
 T:     quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-i2c/
-T:     git git://git.fluff.org/bjdooks/linux.git
+T:     git git://git.pengutronix.de/git/wsa/linux.git
 S:     Maintained
 F:     Documentation/i2c/
 F:     drivers/i2c/
index 371ce8899f5c758633ed25ffdacef01b45fa6e99..ae6928cc59d36d1450b6c9f6e5d87067b662a8db 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 6
 SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION = -rc6
 NAME = Saber-toothed Squirrel
 
 # *DOCUMENTATION*
index c5f9ae5dbd1ad04da804d68e475d3b4b230832f6..2f88d8d9770116014f7ff55e80d98c65fbb191ed 100644 (file)
@@ -6,7 +6,7 @@ config ARM
        select HAVE_DMA_API_DEBUG
        select HAVE_IDE if PCI || ISA || PCMCIA
        select HAVE_DMA_ATTRS
-       select HAVE_DMA_CONTIGUOUS if (CPU_V6 || CPU_V6K || CPU_V7)
+       select HAVE_DMA_CONTIGUOUS if MMU
        select HAVE_MEMBLOCK
        select RTC_LIB
        select SYS_SUPPORTS_APM_EMULATION
index f15f82bf3a50f808005af479ff9c334f100c1044..e968a52e4881967a01f8aa68fc586aafe61edf38 100644 (file)
@@ -356,15 +356,15 @@ choice
                  is nothing connected to read from the DCC.
 
        config DEBUG_SEMIHOSTING
-               bool "Kernel low-level debug output via semihosting I"
+               bool "Kernel low-level debug output via semihosting I/O"
                help
                  Semihosting enables code running on an ARM target to use
                  the I/O facilities on a host debugger/emulator through a
-                 simple SVC calls. The host debugger or emulator must have
+                 simple SVC call. The host debugger or emulator must have
                  semihosting enabled for the special svc call to be trapped
                  otherwise the kernel will crash.
 
-                 This is known to work with OpenOCD, as wellas
+                 This is known to work with OpenOCD, as well as
                  ARM's Fast Models, or any other controlling environment
                  that implements semihosting.
 
index 30eae87ead6d4b245bc6707040645f5182464e90..a051dfbdd7db07fb12db913d85050a5bb0ea7167 100644 (file)
@@ -284,10 +284,10 @@ zImage Image xipImage bootpImage uImage: vmlinux
 zinstall uinstall install: vmlinux
        $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@
 
-%.dtb:
+%.dtb: scripts
        $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
 
-dtbs:
+dtbs: scripts
        $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
 
 # We use MRPROPER_FILES and CLEAN_FILES now
index b8c64b80bafc848032dfff366a9ac4428a7f82b5..81769c1341fa7d071c105d8dcb6b2fe0d9b8110c 100644 (file)
@@ -659,10 +659,14 @@ __armv7_mmu_cache_on:
 #ifdef CONFIG_CPU_ENDIAN_BE8
                orr     r0, r0, #1 << 25        @ big-endian page tables
 #endif
+               mrcne   p15, 0, r6, c2, c0, 2   @ read ttb control reg
                orrne   r0, r0, #1              @ MMU enabled
                movne   r1, #0xfffffffd         @ domain 0 = client
+               bic     r6, r6, #1 << 31        @ 32-bit translation system
+               bic     r6, r6, #3 << 0         @ use only ttbr0
                mcrne   p15, 0, r3, c2, c0, 0   @ load page table pointer
                mcrne   p15, 0, r1, c3, c0, 0   @ load domain access control
+               mcrne   p15, 0, r6, c2, c0, 2   @ load ttb control
 #endif
                mcr     p15, 0, r0, c7, c5, 4   @ ISB
                mcr     p15, 0, r0, c1, c0, 0   @ load control register
index 7829a4d0cb22e2b4c010def2ca689e46c7682044..96514c134e54bafd1540c5f55524411b09f1e7a8 100644 (file)
@@ -15,7 +15,7 @@
        compatible = "atmel,at91sam9g25ek", "atmel,at91sam9x5ek", "atmel,at91sam9x5", "atmel,at91sam9";
 
        chosen {
-               bootargs = "128M console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=ubifs ubi.mtd=1 root=ubi0:rootfs";
+               bootargs = "console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=ubifs ubi.mtd=1 root=ubi0:rootfs";
        };
 
        ahb {
index 7d8718468e0dff1ec28b63014f6c56ec62650d10..90610c7030f7aafd4ccc9663983bc367e89324dd 100644 (file)
@@ -33,7 +33,7 @@ CONFIG_AEABI=y
 CONFIG_FORCE_MAX_ZONEORDER=13
 CONFIG_ZBOOT_ROM_TEXT=0x0
 CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_CMDLINE="console=tty0 console=ttySC1,115200 earlyprintk=sh-sci.1,115200 ignore_loglevel root=/dev/nfs ip=dhcp nfsroot=,rsize=4096,wsize=4096"
+CONFIG_CMDLINE="console=tty0 console=ttySC1,115200 earlyprintk=sh-sci.1,115200 ignore_loglevel root=/dev/nfs ip=dhcp nfsroot=,rsize=4096,wsize=4096 rw"
 CONFIG_CMDLINE_FORCE=y
 CONFIG_KEXEC=y
 CONFIG_VFP=y
index 03fb93621d0d6046b7b416c9c4328437380fd843..5c8b3bf4d8252f1013af6fea25848ce9c41dcfeb 100644 (file)
        .size \name , . - \name
        .endm
 
+       .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
+#ifndef CONFIG_CPU_USE_DOMAINS
+       adds    \tmp, \addr, #\size - 1
+       sbcccs  \tmp, \tmp, \limit
+       bcs     \bad
+#endif
+       .endm
+
 #endif /* __ASM_ASSEMBLER_H__ */
index 2ae842df455180d3bcd445d339ecd332d3b36c4e..5c44dcb0987bd31418298f7932baaa5a82320477 100644 (file)
@@ -202,6 +202,13 @@ static inline void dma_free_writecombine(struct device *dev, size_t size,
        return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
 }
 
+/*
+ * This can be called during early boot to increase the size of the atomic
+ * coherent DMA pool above the default value of 256KiB. It must be called
+ * before postcore_initcall.
+ */
+extern void __init init_dma_coherent_pool_size(unsigned long size);
+
 /*
  * This can be called during boot to increase the size of the consistent
  * DMA region above it's default value of 2MB. It must be called before the
index e965f1b560f11e3a504814183c98f8f1b11bbf25..5f6ddcc56452998f40b1c16d7e20a0ff1ec010bd 100644 (file)
@@ -187,6 +187,7 @@ static inline unsigned long __phys_to_virt(unsigned long x)
 #define __phys_to_virt(x)      ((x) - PHYS_OFFSET + PAGE_OFFSET)
 #endif
 #endif
+#endif /* __ASSEMBLY__ */
 
 #ifndef PHYS_OFFSET
 #ifdef PLAT_PHYS_OFFSET
@@ -196,6 +197,8 @@ static inline unsigned long __phys_to_virt(unsigned long x)
 #endif
 #endif
 
+#ifndef __ASSEMBLY__
+
 /*
  * PFNs are used to describe any physical page; this means
  * PFN 0 == physical address 0.
index 314d4664eae7d9976a5fe656f74918cb8d15ab8b..99a19512ee26e2e5d99135d21f10d8b99e606226 100644 (file)
@@ -199,6 +199,9 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
 {
        pgtable_page_dtor(pte);
 
+#ifdef CONFIG_ARM_LPAE
+       tlb_add_flush(tlb, addr);
+#else
        /*
         * With the classic ARM MMU, a pte page has two corresponding pmd
         * entries, each covering 1MB.
@@ -206,6 +209,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
        addr &= PMD_MASK;
        tlb_add_flush(tlb, addr + SZ_1M - PAGE_SIZE);
        tlb_add_flush(tlb, addr + SZ_1M);
+#endif
 
        tlb_remove_page(tlb, pte);
 }
index 479a6352e0b5075911e91a4e0b60b7e9443fd4fd..77bd79f2ffdbd0344d096ca7fb2db809fb52d387 100644 (file)
@@ -101,28 +101,39 @@ extern int __get_user_1(void *);
 extern int __get_user_2(void *);
 extern int __get_user_4(void *);
 
-#define __get_user_x(__r2,__p,__e,__s,__i...)                          \
+#define __GUP_CLOBBER_1        "lr", "cc"
+#ifdef CONFIG_CPU_USE_DOMAINS
+#define __GUP_CLOBBER_2        "ip", "lr", "cc"
+#else
+#define __GUP_CLOBBER_2 "lr", "cc"
+#endif
+#define __GUP_CLOBBER_4        "lr", "cc"
+
+#define __get_user_x(__r2,__p,__e,__l,__s)                             \
           __asm__ __volatile__ (                                       \
                __asmeq("%0", "r0") __asmeq("%1", "r2")                 \
+               __asmeq("%3", "r1")                                     \
                "bl     __get_user_" #__s                               \
                : "=&r" (__e), "=r" (__r2)                              \
-               : "0" (__p)                                             \
-               : __i, "cc")
+               : "0" (__p), "r" (__l)                                  \
+               : __GUP_CLOBBER_##__s)
 
-#define get_user(x,p)                                                  \
+#define __get_user_check(x,p)                                                  \
        ({                                                              \
+               unsigned long __limit = current_thread_info()->addr_limit - 1; \
                register const typeof(*(p)) __user *__p asm("r0") = (p);\
                register unsigned long __r2 asm("r2");                  \
+               register unsigned long __l asm("r1") = __limit;         \
                register int __e asm("r0");                             \
                switch (sizeof(*(__p))) {                               \
                case 1:                                                 \
-                       __get_user_x(__r2, __p, __e, 1, "lr");          \
-                       break;                                          \
+                       __get_user_x(__r2, __p, __e, __l, 1);           \
+                       break;                                          \
                case 2:                                                 \
-                       __get_user_x(__r2, __p, __e, 2, "r3", "lr");    \
+                       __get_user_x(__r2, __p, __e, __l, 2);           \
                        break;                                          \
                case 4:                                                 \
-                       __get_user_x(__r2, __p, __e, 4, "lr");          \
+                       __get_user_x(__r2, __p, __e, __l, 4);           \
                        break;                                          \
                default: __e = __get_user_bad(); break;                 \
                }                                                       \
@@ -130,42 +141,57 @@ extern int __get_user_4(void *);
                __e;                                                    \
        })
 
+#define get_user(x,p)                                                  \
+       ({                                                              \
+               might_fault();                                          \
+               __get_user_check(x,p);                                  \
+        })
+
 extern int __put_user_1(void *, unsigned int);
 extern int __put_user_2(void *, unsigned int);
 extern int __put_user_4(void *, unsigned int);
 extern int __put_user_8(void *, unsigned long long);
 
-#define __put_user_x(__r2,__p,__e,__s)                                 \
+#define __put_user_x(__r2,__p,__e,__l,__s)                             \
           __asm__ __volatile__ (                                       \
                __asmeq("%0", "r0") __asmeq("%2", "r2")                 \
+               __asmeq("%3", "r1")                                     \
                "bl     __put_user_" #__s                               \
                : "=&r" (__e)                                           \
-               : "0" (__p), "r" (__r2)                                 \
+               : "0" (__p), "r" (__r2), "r" (__l)                      \
                : "ip", "lr", "cc")
 
-#define put_user(x,p)                                                  \
+#define __put_user_check(x,p)                                                  \
        ({                                                              \
+               unsigned long __limit = current_thread_info()->addr_limit - 1; \
                register const typeof(*(p)) __r2 asm("r2") = (x);       \
                register const typeof(*(p)) __user *__p asm("r0") = (p);\
+               register unsigned long __l asm("r1") = __limit;         \
                register int __e asm("r0");                             \
                switch (sizeof(*(__p))) {                               \
                case 1:                                                 \
-                       __put_user_x(__r2, __p, __e, 1);                \
+                       __put_user_x(__r2, __p, __e, __l, 1);           \
                        break;                                          \
                case 2:                                                 \
-                       __put_user_x(__r2, __p, __e, 2);                \
+                       __put_user_x(__r2, __p, __e, __l, 2);           \
                        break;                                          \
                case 4:                                                 \
-                       __put_user_x(__r2, __p, __e, 4);                \
+                       __put_user_x(__r2, __p, __e, __l, 4);           \
                        break;                                          \
                case 8:                                                 \
-                       __put_user_x(__r2, __p, __e, 8);                \
+                       __put_user_x(__r2, __p, __e, __l, 8);           \
                        break;                                          \
                default: __e = __put_user_bad(); break;                 \
                }                                                       \
                __e;                                                    \
        })
 
+#define put_user(x,p)                                                  \
+       ({                                                              \
+               might_fault();                                          \
+               __put_user_check(x,p);                                  \
+        })
+
 #else /* CONFIG_MMU */
 
 /*
@@ -219,6 +245,7 @@ do {                                                                        \
        unsigned long __gu_addr = (unsigned long)(ptr);                 \
        unsigned long __gu_val;                                         \
        __chk_user_ptr(ptr);                                            \
+       might_fault();                                                  \
        switch (sizeof(*(ptr))) {                                       \
        case 1: __get_user_asm_byte(__gu_val,__gu_addr,err);    break;  \
        case 2: __get_user_asm_half(__gu_val,__gu_addr,err);    break;  \
@@ -300,6 +327,7 @@ do {                                                                        \
        unsigned long __pu_addr = (unsigned long)(ptr);                 \
        __typeof__(*(ptr)) __pu_val = (x);                              \
        __chk_user_ptr(ptr);                                            \
+       might_fault();                                                  \
        switch (sizeof(*(ptr))) {                                       \
        case 1: __put_user_asm_byte(__pu_val,__pu_addr,err);    break;  \
        case 2: __put_user_asm_half(__pu_val,__pu_addr,err);    break;  \
index ba386bd94107642d9819a7d8bafb7ba04e92b83f..281bf3301241fba2a1ce1baafeaf3020b7cd57ff 100644 (file)
@@ -159,6 +159,12 @@ static int debug_arch_supported(void)
                arch >= ARM_DEBUG_ARCH_V7_1;
 }
 
+/* Can we determine the watchpoint access type from the fsr? */
+static int debug_exception_updates_fsr(void)
+{
+       return 0;
+}
+
 /* Determine number of WRP registers available. */
 static int get_num_wrp_resources(void)
 {
@@ -604,13 +610,14 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
                /* Aligned */
                break;
        case 1:
-               /* Allow single byte watchpoint. */
-               if (info->ctrl.len == ARM_BREAKPOINT_LEN_1)
-                       break;
        case 2:
                /* Allow halfword watchpoints and breakpoints. */
                if (info->ctrl.len == ARM_BREAKPOINT_LEN_2)
                        break;
+       case 3:
+               /* Allow single byte watchpoint. */
+               if (info->ctrl.len == ARM_BREAKPOINT_LEN_1)
+                       break;
        default:
                ret = -EINVAL;
                goto out;
@@ -619,18 +626,35 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
        info->address &= ~alignment_mask;
        info->ctrl.len <<= offset;
 
-       /*
-        * Currently we rely on an overflow handler to take
-        * care of single-stepping the breakpoint when it fires.
-        * In the case of userspace breakpoints on a core with V7 debug,
-        * we can use the mismatch feature as a poor-man's hardware
-        * single-step, but this only works for per-task breakpoints.
-        */
-       if (!bp->overflow_handler && (arch_check_bp_in_kernelspace(bp) ||
-           !core_has_mismatch_brps() || !bp->hw.bp_target)) {
-               pr_warning("overflow handler required but none found\n");
-               ret = -EINVAL;
+       if (!bp->overflow_handler) {
+               /*
+                * Mismatch breakpoints are required for single-stepping
+                * breakpoints.
+                */
+               if (!core_has_mismatch_brps())
+                       return -EINVAL;
+
+               /* We don't allow mismatch breakpoints in kernel space. */
+               if (arch_check_bp_in_kernelspace(bp))
+                       return -EPERM;
+
+               /*
+                * Per-cpu breakpoints are not supported by our stepping
+                * mechanism.
+                */
+               if (!bp->hw.bp_target)
+                       return -EINVAL;
+
+               /*
+                * We only support specific access types if the fsr
+                * reports them.
+                */
+               if (!debug_exception_updates_fsr() &&
+                   (info->ctrl.type == ARM_BREAKPOINT_LOAD ||
+                    info->ctrl.type == ARM_BREAKPOINT_STORE))
+                       return -EINVAL;
        }
+
 out:
        return ret;
 }
@@ -706,10 +730,12 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr,
                                goto unlock;
 
                        /* Check that the access type matches. */
-                       access = (fsr & ARM_FSR_ACCESS_MASK) ? HW_BREAKPOINT_W :
-                                HW_BREAKPOINT_R;
-                       if (!(access & hw_breakpoint_type(wp)))
-                               goto unlock;
+                       if (debug_exception_updates_fsr()) {
+                               access = (fsr & ARM_FSR_ACCESS_MASK) ?
+                                         HW_BREAKPOINT_W : HW_BREAKPOINT_R;
+                               if (!(access & hw_breakpoint_type(wp)))
+                                       goto unlock;
+                       }
 
                        /* We have a winner. */
                        info->trigger = addr;
index ebd8ad274d76bb82488240e9543d7a1d99b5c674..8e03567c958354b030da70a27a9db1d01e335757 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/percpu.h>
 #include <linux/clockchips.h>
 #include <linux/completion.h>
+#include <linux/cpufreq.h>
 
 #include <linux/atomic.h>
 #include <asm/cacheflush.h>
@@ -584,3 +585,56 @@ int setup_profiling_timer(unsigned int multiplier)
 {
        return -EINVAL;
 }
+
+#ifdef CONFIG_CPU_FREQ
+
+static DEFINE_PER_CPU(unsigned long, l_p_j_ref);
+static DEFINE_PER_CPU(unsigned long, l_p_j_ref_freq);
+static unsigned long global_l_p_j_ref;
+static unsigned long global_l_p_j_ref_freq;
+
+static int cpufreq_callback(struct notifier_block *nb,
+                                       unsigned long val, void *data)
+{
+       struct cpufreq_freqs *freq = data;
+       int cpu = freq->cpu;
+
+       if (freq->flags & CPUFREQ_CONST_LOOPS)
+               return NOTIFY_OK;
+
+       if (!per_cpu(l_p_j_ref, cpu)) {
+               per_cpu(l_p_j_ref, cpu) =
+                       per_cpu(cpu_data, cpu).loops_per_jiffy;
+               per_cpu(l_p_j_ref_freq, cpu) = freq->old;
+               if (!global_l_p_j_ref) {
+                       global_l_p_j_ref = loops_per_jiffy;
+                       global_l_p_j_ref_freq = freq->old;
+               }
+       }
+
+       if ((val == CPUFREQ_PRECHANGE  && freq->old < freq->new) ||
+           (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
+           (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
+               loops_per_jiffy = cpufreq_scale(global_l_p_j_ref,
+                                               global_l_p_j_ref_freq,
+                                               freq->new);
+               per_cpu(cpu_data, cpu).loops_per_jiffy =
+                       cpufreq_scale(per_cpu(l_p_j_ref, cpu),
+                                       per_cpu(l_p_j_ref_freq, cpu),
+                                       freq->new);
+       }
+       return NOTIFY_OK;
+}
+
+static struct notifier_block cpufreq_notifier = {
+       .notifier_call  = cpufreq_callback,
+};
+
+static int __init register_cpufreq_notifier(void)
+{
+       return cpufreq_register_notifier(&cpufreq_notifier,
+                                               CPUFREQ_TRANSITION_NOTIFIER);
+}
+core_initcall(register_cpufreq_notifier);
+
+#endif
index f7945218b8c63a722cf08badc229345d7083b052..b0179b89a04ce26062184aaf23f86c521fb3009c 100644 (file)
@@ -420,20 +420,23 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
 #endif
                        instr = *(u32 *) pc;
        } else if (thumb_mode(regs)) {
-               get_user(instr, (u16 __user *)pc);
+               if (get_user(instr, (u16 __user *)pc))
+                       goto die_sig;
                if (is_wide_instruction(instr)) {
                        unsigned int instr2;
-                       get_user(instr2, (u16 __user *)pc+1);
+                       if (get_user(instr2, (u16 __user *)pc+1))
+                               goto die_sig;
                        instr <<= 16;
                        instr |= instr2;
                }
-       } else {
-               get_user(instr, (u32 __user *)pc);
+       } else if (get_user(instr, (u32 __user *)pc)) {
+               goto die_sig;
        }
 
        if (call_undef_hook(regs, instr) == 0)
                return;
 
+die_sig:
 #ifdef CONFIG_DEBUG_USER
        if (user_debug & UDBG_UNDEFINED) {
                printk(KERN_INFO "%s (%d): undefined instruction: pc=%p\n",
index d6dacc69254e47ddca3f399cc6f37eef12b073d8..395d5fbb8fa20c01c8b71d37dce42df700820c91 100644 (file)
@@ -59,6 +59,7 @@ void __init init_current_timer_delay(unsigned long freq)
 {
        pr_info("Switching to timer-based delay loop\n");
        lpj_fine                        = freq / HZ;
+       loops_per_jiffy                 = lpj_fine;
        arm_delay_ops.delay             = __timer_delay;
        arm_delay_ops.const_udelay      = __timer_const_udelay;
        arm_delay_ops.udelay            = __timer_udelay;
index 11093a7c3e32289e95a8c100cc01ef2bbb8d7101..9b06bb41fca659b9bbfc2f996e703ce2b8c315aa 100644 (file)
@@ -16,8 +16,9 @@
  * __get_user_X
  *
  * Inputs:     r0 contains the address
+ *             r1 contains the address limit, which must be preserved
  * Outputs:    r0 is the error code
- *             r2, r3 contains the zero-extended value
+ *             r2 contains the zero-extended value
  *             lr corrupted
  *
  * No other registers must be altered.  (see <asm/uaccess.h>
  * Note also that it is intended that __get_user_bad is not global.
  */
 #include <linux/linkage.h>
+#include <asm/assembler.h>
 #include <asm/errno.h>
 #include <asm/domain.h>
 
 ENTRY(__get_user_1)
+       check_uaccess r0, 1, r1, r2, __get_user_bad
 1: TUSER(ldrb) r2, [r0]
        mov     r0, #0
        mov     pc, lr
 ENDPROC(__get_user_1)
 
 ENTRY(__get_user_2)
-#ifdef CONFIG_THUMB2_KERNEL
-2: TUSER(ldrb) r2, [r0]
-3: TUSER(ldrb) r3, [r0, #1]
+       check_uaccess r0, 2, r1, r2, __get_user_bad
+#ifdef CONFIG_CPU_USE_DOMAINS
+rb     .req    ip
+2:     ldrbt   r2, [r0], #1
+3:     ldrbt   rb, [r0], #0
 #else
-2: TUSER(ldrb) r2, [r0], #1
-3: TUSER(ldrb) r3, [r0]
+rb     .req    r0
+2:     ldrb    r2, [r0]
+3:     ldrb    rb, [r0, #1]
 #endif
 #ifndef __ARMEB__
-       orr     r2, r2, r3, lsl #8
+       orr     r2, r2, rb, lsl #8
 #else
-       orr     r2, r3, r2, lsl #8
+       orr     r2, rb, r2, lsl #8
 #endif
        mov     r0, #0
        mov     pc, lr
 ENDPROC(__get_user_2)
 
 ENTRY(__get_user_4)
+       check_uaccess r0, 4, r1, r2, __get_user_bad
 4: TUSER(ldr)  r2, [r0]
        mov     r0, #0
        mov     pc, lr
index 7db25990c589f3d98554d9aee47cf7b5c3c486fd..3d73dcb959b0da83bc8affe3a781b7fcbdb17752 100644 (file)
@@ -16,6 +16,7 @@
  * __put_user_X
  *
  * Inputs:     r0 contains the address
+ *             r1 contains the address limit, which must be preserved
  *             r2, r3 contains the value
  * Outputs:    r0 is the error code
  *             lr corrupted
  * Note also that it is intended that __put_user_bad is not global.
  */
 #include <linux/linkage.h>
+#include <asm/assembler.h>
 #include <asm/errno.h>
 #include <asm/domain.h>
 
 ENTRY(__put_user_1)
+       check_uaccess r0, 1, r1, ip, __put_user_bad
 1: TUSER(strb) r2, [r0]
        mov     r0, #0
        mov     pc, lr
 ENDPROC(__put_user_1)
 
 ENTRY(__put_user_2)
+       check_uaccess r0, 2, r1, ip, __put_user_bad
        mov     ip, r2, lsr #8
 #ifdef CONFIG_THUMB2_KERNEL
 #ifndef __ARMEB__
@@ -60,12 +64,14 @@ ENTRY(__put_user_2)
 ENDPROC(__put_user_2)
 
 ENTRY(__put_user_4)
+       check_uaccess r0, 4, r1, ip, __put_user_bad
 4: TUSER(str)  r2, [r0]
        mov     r0, #0
        mov     pc, lr
 ENDPROC(__put_user_4)
 
 ENTRY(__put_user_8)
+       check_uaccess r0, 8, r1, ip, __put_user_bad
 #ifdef CONFIG_THUMB2_KERNEL
 5: TUSER(str)  r2, [r0]
 6: TUSER(str)  r3, [r0, #4]
index 104ca40d8d18908cb463e7ae1cd56790c926d971..aaa443b48c91f121e2f698792fb94bb4d6e2afa6 100644 (file)
@@ -197,7 +197,7 @@ void __init at91rm9200_timer_init(void)
        at91_st_read(AT91_ST_SR);
 
        /* Make IRQs happen for the system timer */
-       setup_irq(AT91_ID_SYS, &at91rm9200_timer_irq);
+       setup_irq(NR_IRQS_LEGACY + AT91_ID_SYS, &at91rm9200_timer_irq);
 
        /* The 32KiHz "Slow Clock" (tick every 30517.58 nanoseconds) is used
         * directly for the clocksource and all clockevents, after adjusting
index 7b9c2ba396edb854cfd78ddef676250a751e60bc..bce572a530ef2c59946e2f574229d905bbfe041b 100644 (file)
@@ -726,6 +726,8 @@ static struct resource rtt_resources[] = {
                .flags  = IORESOURCE_MEM,
        }, {
                .flags  = IORESOURCE_MEM,
+       }, {
+               .flags  = IORESOURCE_IRQ,
        },
 };
 
@@ -744,10 +746,12 @@ static void __init at91_add_device_rtt_rtc(void)
         * The second resource is needed:
         * GPBR will serve as the storage for RTC time offset
         */
-       at91sam9260_rtt_device.num_resources = 2;
+       at91sam9260_rtt_device.num_resources = 3;
        rtt_resources[1].start = AT91SAM9260_BASE_GPBR +
                                 4 * CONFIG_RTC_DRV_AT91SAM9_GPBR;
        rtt_resources[1].end = rtt_resources[1].start + 3;
+       rtt_resources[2].start = NR_IRQS_LEGACY + AT91_ID_SYS;
+       rtt_resources[2].end = NR_IRQS_LEGACY + AT91_ID_SYS;
 }
 #else
 static void __init at91_add_device_rtt_rtc(void)
index 8df5c1bdff92f1d1194fa9d64da61e2aa9bc2598..bc2590d712d06b214a81171a90c45c9f00ac9252 100644 (file)
@@ -609,6 +609,8 @@ static struct resource rtt_resources[] = {
                .flags  = IORESOURCE_MEM,
        }, {
                .flags  = IORESOURCE_MEM,
+       }, {
+               .flags  = IORESOURCE_IRQ,
        }
 };
 
@@ -626,10 +628,12 @@ static void __init at91_add_device_rtt_rtc(void)
         * The second resource is needed:
         * GPBR will serve as the storage for RTC time offset
         */
-       at91sam9261_rtt_device.num_resources = 2;
+       at91sam9261_rtt_device.num_resources = 3;
        rtt_resources[1].start = AT91SAM9261_BASE_GPBR +
                                 4 * CONFIG_RTC_DRV_AT91SAM9_GPBR;
        rtt_resources[1].end = rtt_resources[1].start + 3;
+       rtt_resources[2].start = NR_IRQS_LEGACY + AT91_ID_SYS;
+       rtt_resources[2].end = NR_IRQS_LEGACY + AT91_ID_SYS;
 }
 #else
 static void __init at91_add_device_rtt_rtc(void)
index eb6bbf86fb9f597cc7424a52b459bb04ecff2326..9b6ca734f1a96e6b903f177185fc9d23af2f28e5 100644 (file)
@@ -990,6 +990,8 @@ static struct resource rtt0_resources[] = {
                .flags  = IORESOURCE_MEM,
        }, {
                .flags  = IORESOURCE_MEM,
+       }, {
+               .flags  = IORESOURCE_IRQ,
        }
 };
 
@@ -1006,6 +1008,8 @@ static struct resource rtt1_resources[] = {
                .flags  = IORESOURCE_MEM,
        }, {
                .flags  = IORESOURCE_MEM,
+       }, {
+               .flags  = IORESOURCE_IRQ,
        }
 };
 
@@ -1027,14 +1031,14 @@ static void __init at91_add_device_rtt_rtc(void)
                 * The second resource is needed only for the chosen RTT:
                 * GPBR will serve as the storage for RTC time offset
                 */
-               at91sam9263_rtt0_device.num_resources = 2;
+               at91sam9263_rtt0_device.num_resources = 3;
                at91sam9263_rtt1_device.num_resources = 1;
                pdev = &at91sam9263_rtt0_device;
                r = rtt0_resources;
                break;
        case 1:
                at91sam9263_rtt0_device.num_resources = 1;
-               at91sam9263_rtt1_device.num_resources = 2;
+               at91sam9263_rtt1_device.num_resources = 3;
                pdev = &at91sam9263_rtt1_device;
                r = rtt1_resources;
                break;
@@ -1047,6 +1051,8 @@ static void __init at91_add_device_rtt_rtc(void)
        pdev->name = "rtc-at91sam9";
        r[1].start = AT91SAM9263_BASE_GPBR + 4 * CONFIG_RTC_DRV_AT91SAM9_GPBR;
        r[1].end = r[1].start + 3;
+       r[2].start = NR_IRQS_LEGACY + AT91_ID_SYS;
+       r[2].end = NR_IRQS_LEGACY + AT91_ID_SYS;
 }
 #else
 static void __init at91_add_device_rtt_rtc(void)
index 06073996a38241d50fb1595c52017a50567d7e44..1b47319ca00b1a72e7c8600e4469b5661529f54a 100644 (file)
@@ -1293,6 +1293,8 @@ static struct resource rtt_resources[] = {
                .flags  = IORESOURCE_MEM,
        }, {
                .flags  = IORESOURCE_MEM,
+       }, {
+               .flags  = IORESOURCE_IRQ,
        }
 };
 
@@ -1310,10 +1312,12 @@ static void __init at91_add_device_rtt_rtc(void)
         * The second resource is needed:
         * GPBR will serve as the storage for RTC time offset
         */
-       at91sam9g45_rtt_device.num_resources = 2;
+       at91sam9g45_rtt_device.num_resources = 3;
        rtt_resources[1].start = AT91SAM9G45_BASE_GPBR +
                                 4 * CONFIG_RTC_DRV_AT91SAM9_GPBR;
        rtt_resources[1].end = rtt_resources[1].start + 3;
+       rtt_resources[2].start = NR_IRQS_LEGACY + AT91_ID_SYS;
+       rtt_resources[2].end = NR_IRQS_LEGACY + AT91_ID_SYS;
 }
 #else
 static void __init at91_add_device_rtt_rtc(void)
index f09fff932172ffec238c10714331d7c8970a53e1..b3d365dadef59740154b808fb05c07806f213dab 100644 (file)
@@ -688,6 +688,8 @@ static struct resource rtt_resources[] = {
                .flags  = IORESOURCE_MEM,
        }, {
                .flags  = IORESOURCE_MEM,
+       }, {
+               .flags  = IORESOURCE_IRQ,
        }
 };
 
@@ -705,10 +707,12 @@ static void __init at91_add_device_rtt_rtc(void)
         * The second resource is needed:
         * GPBR will serve as the storage for RTC time offset
         */
-       at91sam9rl_rtt_device.num_resources = 2;
+       at91sam9rl_rtt_device.num_resources = 3;
        rtt_resources[1].start = AT91SAM9RL_BASE_GPBR +
                                 4 * CONFIG_RTC_DRV_AT91SAM9_GPBR;
        rtt_resources[1].end = rtt_resources[1].start + 3;
+       rtt_resources[2].start = NR_IRQS_LEGACY + AT91_ID_SYS;
+       rtt_resources[2].end = NR_IRQS_LEGACY + AT91_ID_SYS;
 }
 #else
 static void __init at91_add_device_rtt_rtc(void)
index de2ec6b8fea7693555c2b25367b4efb1e06cae25..188c82971ebd069ac890f90726660e8181e6848b 100644 (file)
@@ -63,6 +63,12 @@ EXPORT_SYMBOL_GPL(at91_pmc_base);
 
 #define cpu_has_300M_plla()    (cpu_is_at91sam9g10())
 
+#define cpu_has_240M_plla()    (cpu_is_at91sam9261() \
+                               || cpu_is_at91sam9263() \
+                               || cpu_is_at91sam9rl())
+
+#define cpu_has_210M_plla()    (cpu_is_at91sam9260())
+
 #define cpu_has_pllb()         (!(cpu_is_at91sam9rl() \
                                || cpu_is_at91sam9g45() \
                                || cpu_is_at91sam9x5() \
@@ -706,6 +712,12 @@ static int __init at91_pmc_init(unsigned long main_clock)
        } else if (cpu_has_800M_plla()) {
                if (plla.rate_hz > 800000000)
                        pll_overclock = true;
+       } else if (cpu_has_240M_plla()) {
+               if (plla.rate_hz > 240000000)
+                       pll_overclock = true;
+       } else if (cpu_has_210M_plla()) {
+               if (plla.rate_hz > 210000000)
+                       pll_overclock = true;
        } else {
                if (plla.rate_hz > 209000000)
                        pll_overclock = true;
index ca70e5fcc7ac12cbd519d9d8a1b246253fe7f12a..020852d3bdd8bd002710e79aba3e036074fefd55 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/sched.h>
 #include <asm/irq.h>
 #include <asm/mach/irq.h>
+#include <asm/system_misc.h>
 #include <mach/hardware.h>
 
 #define IRQ_SOURCE(base_addr)  (base_addr + 0x00)
index fdd8cc87c9feee388ca8a94b0fb46fa20305f33a..4431a62fff5b9d3a140cd1ec3473e4f51a05f99b 100644 (file)
@@ -222,10 +222,8 @@ int __init mx25_clocks_init(void)
        clk_register_clkdev(clk[lcdc_ipg], "ipg", "imx-fb.0");
        clk_register_clkdev(clk[lcdc_ahb], "ahb", "imx-fb.0");
        clk_register_clkdev(clk[wdt_ipg], NULL, "imx2-wdt.0");
-       clk_register_clkdev(clk[ssi1_ipg_per], "per", "imx-ssi.0");
-       clk_register_clkdev(clk[ssi1_ipg], "ipg", "imx-ssi.0");
-       clk_register_clkdev(clk[ssi2_ipg_per], "per", "imx-ssi.1");
-       clk_register_clkdev(clk[ssi2_ipg], "ipg", "imx-ssi.1");
+       clk_register_clkdev(clk[ssi1_ipg], NULL, "imx-ssi.0");
+       clk_register_clkdev(clk[ssi2_ipg], NULL, "imx-ssi.1");
        clk_register_clkdev(clk[esdhc1_ipg_per], "per", "sdhci-esdhc-imx25.0");
        clk_register_clkdev(clk[esdhc1_ipg], "ipg", "sdhci-esdhc-imx25.0");
        clk_register_clkdev(clk[esdhc1_ahb], "ahb", "sdhci-esdhc-imx25.0");
index c6422fb10bae37756693f3323f79df62e4fc932e..65fb8bcd86cb1652385ee0320679d04a427f8295 100644 (file)
@@ -230,10 +230,8 @@ int __init mx35_clocks_init()
        clk_register_clkdev(clk[ipu_gate], NULL, "mx3_sdc_fb");
        clk_register_clkdev(clk[owire_gate], NULL, "mxc_w1");
        clk_register_clkdev(clk[sdma_gate], NULL, "imx35-sdma");
-       clk_register_clkdev(clk[ipg], "ipg", "imx-ssi.0");
-       clk_register_clkdev(clk[ssi1_div_post], "per", "imx-ssi.0");
-       clk_register_clkdev(clk[ipg], "ipg", "imx-ssi.1");
-       clk_register_clkdev(clk[ssi2_div_post], "per", "imx-ssi.1");
+       clk_register_clkdev(clk[ssi1_gate], NULL, "imx-ssi.0");
+       clk_register_clkdev(clk[ssi2_gate], NULL, "imx-ssi.1");
        /* i.mx35 has the i.mx21 type uart */
        clk_register_clkdev(clk[uart1_gate], "per", "imx21-uart.0");
        clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.0");
index 3226077735b1d7104627f8f40307e84c97d4ca5b..1201191d7f1bb24c386df88f5fe4de14e172b006 100644 (file)
@@ -517,6 +517,13 @@ void __init kirkwood_wdt_init(void)
 void __init kirkwood_init_early(void)
 {
        orion_time_set_base(TIMER_VIRT_BASE);
+
+       /*
+        * Some Kirkwood devices allocate their coherent buffers from atomic
+        * context. Increase size of atomic coherent pool to make sure such
+        * the allocations won't fail.
+        */
+       init_dma_coherent_pool_size(SZ_1M);
 }
 
 int kirkwood_tclk;
index d933593795985749fce168849dd04b8edf917506..be90b7d0e10bee11eaa9b70013413b0d1d964637 100644 (file)
@@ -10,6 +10,7 @@
 
 #include <linux/kernel.h>
 #include <linux/init.h>
+#include <linux/sizes.h>
 #include <linux/platform_device.h>
 #include <linux/mtd/partitions.h>
 #include <linux/ata_platform.h>
index fcd4e85c4ddcce66921a83b29e45669c50704918..346fd26f3aa62bcbe29757ddaa1c98fb9929c53b 100644 (file)
@@ -232,10 +232,11 @@ config MACH_OMAP3_PANDORA
        select OMAP_PACKAGE_CBB
        select REGULATOR_FIXED_VOLTAGE if REGULATOR
 
-config MACH_OMAP3_TOUCHBOOK
+config MACH_TOUCHBOOK
        bool "OMAP3 Touch Book"
        depends on ARCH_OMAP3
        default y
+       select OMAP_PACKAGE_CBB
 
 config MACH_OMAP_3430SDP
        bool "OMAP 3430 SDP board"
index f6a24b3f9c4f7b4dd3e381eb3ff7823444d99e70..34c2c7f59f0a855b2be4ddcd5f2ad2a8995995b1 100644 (file)
@@ -255,7 +255,7 @@ obj-$(CONFIG_MACH_OMAP_3630SDP)             += board-zoom-display.o
 obj-$(CONFIG_MACH_CM_T35)              += board-cm-t35.o
 obj-$(CONFIG_MACH_CM_T3517)            += board-cm-t3517.o
 obj-$(CONFIG_MACH_IGEP0020)            += board-igep0020.o
-obj-$(CONFIG_MACH_OMAP3_TOUCHBOOK)     += board-omap3touchbook.o
+obj-$(CONFIG_MACH_TOUCHBOOK)           += board-omap3touchbook.o
 obj-$(CONFIG_MACH_OMAP_4430SDP)                += board-4430sdp.o
 obj-$(CONFIG_MACH_OMAP4_PANDA)         += board-omap4panda.o
 
index 25bbcc7ca4dce9794676001167a23f36474fbd86..ae27de8899a69207ac90aca52164bb7ccf37a582 100644 (file)
@@ -1036,13 +1036,13 @@ static struct omap_clk am33xx_clks[] = {
        CLK(NULL,       "mmu_fck",              &mmu_fck,       CK_AM33XX),
        CLK(NULL,       "smartreflex0_fck",     &smartreflex0_fck,      CK_AM33XX),
        CLK(NULL,       "smartreflex1_fck",     &smartreflex1_fck,      CK_AM33XX),
-       CLK(NULL,       "gpt1_fck",             &timer1_fck,    CK_AM33XX),
-       CLK(NULL,       "gpt2_fck",             &timer2_fck,    CK_AM33XX),
-       CLK(NULL,       "gpt3_fck",             &timer3_fck,    CK_AM33XX),
-       CLK(NULL,       "gpt4_fck",             &timer4_fck,    CK_AM33XX),
-       CLK(NULL,       "gpt5_fck",             &timer5_fck,    CK_AM33XX),
-       CLK(NULL,       "gpt6_fck",             &timer6_fck,    CK_AM33XX),
-       CLK(NULL,       "gpt7_fck",             &timer7_fck,    CK_AM33XX),
+       CLK(NULL,       "timer1_fck",           &timer1_fck,    CK_AM33XX),
+       CLK(NULL,       "timer2_fck",           &timer2_fck,    CK_AM33XX),
+       CLK(NULL,       "timer3_fck",           &timer3_fck,    CK_AM33XX),
+       CLK(NULL,       "timer4_fck",           &timer4_fck,    CK_AM33XX),
+       CLK(NULL,       "timer5_fck",           &timer5_fck,    CK_AM33XX),
+       CLK(NULL,       "timer6_fck",           &timer6_fck,    CK_AM33XX),
+       CLK(NULL,       "timer7_fck",           &timer7_fck,    CK_AM33XX),
        CLK(NULL,       "usbotg_fck",           &usbotg_fck,    CK_AM33XX),
        CLK(NULL,       "ieee5000_fck",         &ieee5000_fck,  CK_AM33XX),
        CLK(NULL,       "wdt1_fck",             &wdt1_fck,      CK_AM33XX),
index a0d68dbecfa3bb96cd52226b0f8d7965379d1252..f99e65cfb86223c77ed544d1a8fbe6a0c4ad4b51 100644 (file)
@@ -241,6 +241,52 @@ static void omap3_clkdm_deny_idle(struct clockdomain *clkdm)
                _clkdm_del_autodeps(clkdm);
 }
 
+static int omap3xxx_clkdm_clk_enable(struct clockdomain *clkdm)
+{
+       bool hwsup = false;
+
+       if (!clkdm->clktrctrl_mask)
+               return 0;
+
+       hwsup = omap2_cm_is_clkdm_in_hwsup(clkdm->pwrdm.ptr->prcm_offs,
+                               clkdm->clktrctrl_mask);
+
+       if (hwsup) {
+               /* Disable HW transitions when we are changing deps */
+               _disable_hwsup(clkdm);
+               _clkdm_add_autodeps(clkdm);
+               _enable_hwsup(clkdm);
+       } else {
+               if (clkdm->flags & CLKDM_CAN_FORCE_WAKEUP)
+                       omap3_clkdm_wakeup(clkdm);
+       }
+
+       return 0;
+}
+
+static int omap3xxx_clkdm_clk_disable(struct clockdomain *clkdm)
+{
+       bool hwsup = false;
+
+       if (!clkdm->clktrctrl_mask)
+               return 0;
+
+       hwsup = omap2_cm_is_clkdm_in_hwsup(clkdm->pwrdm.ptr->prcm_offs,
+                               clkdm->clktrctrl_mask);
+
+       if (hwsup) {
+               /* Disable HW transitions when we are changing deps */
+               _disable_hwsup(clkdm);
+               _clkdm_del_autodeps(clkdm);
+               _enable_hwsup(clkdm);
+       } else {
+               if (clkdm->flags & CLKDM_CAN_FORCE_SLEEP)
+                       omap3_clkdm_sleep(clkdm);
+       }
+
+       return 0;
+}
+
 struct clkdm_ops omap2_clkdm_operations = {
        .clkdm_add_wkdep        = omap2_clkdm_add_wkdep,
        .clkdm_del_wkdep        = omap2_clkdm_del_wkdep,
@@ -267,6 +313,6 @@ struct clkdm_ops omap3_clkdm_operations = {
        .clkdm_wakeup           = omap3_clkdm_wakeup,
        .clkdm_allow_idle       = omap3_clkdm_allow_idle,
        .clkdm_deny_idle        = omap3_clkdm_deny_idle,
-       .clkdm_clk_enable       = omap2_clkdm_clk_enable,
-       .clkdm_clk_disable      = omap2_clkdm_clk_disable,
+       .clkdm_clk_enable       = omap3xxx_clkdm_clk_enable,
+       .clkdm_clk_disable      = omap3xxx_clkdm_clk_disable,
 };
index 766338fe4d347746ee04eb3961452a7b3f8f6aa9..975f6bda0e0b7a84855a2e6c90f8051df747f6a2 100644 (file)
@@ -67,6 +67,7 @@
 #define OMAP3430_EN_IVA2_DPLL_MASK                     (0x7 << 0)
 
 /* CM_IDLEST_IVA2 */
+#define OMAP3430_ST_IVA2_SHIFT                         0
 #define OMAP3430_ST_IVA2_MASK                          (1 << 0)
 
 /* CM_IDLEST_PLL_IVA2 */
index 05fdebfaa195b0e5fc87e33217f24ce1b5c09822..330d4c6e746b703819f95ba41d733a88523b555a 100644 (file)
@@ -46,7 +46,7 @@
 static void __iomem *wakeupgen_base;
 static void __iomem *sar_base;
 static DEFINE_SPINLOCK(wakeupgen_lock);
-static unsigned int irq_target_cpu[NR_IRQS];
+static unsigned int irq_target_cpu[MAX_IRQS];
 static unsigned int irq_banks = MAX_NR_REG_BANKS;
 static unsigned int max_irqs = MAX_IRQS;
 static unsigned int omap_secure_apis;
index 6ca8e519968d0c4e82e94fb384ab84da90a892b1..37afbd173c2c27969e81f37917d70a767e8fbc2d 100644 (file)
@@ -1889,6 +1889,7 @@ static int _enable(struct omap_hwmod *oh)
                        _enable_sysc(oh);
                }
        } else {
+               _omap4_disable_module(oh);
                _disable_clocks(oh);
                pr_debug("omap_hwmod: %s: _wait_target_ready: %d\n",
                         oh->name, r);
index c9e38200216b2985cb3ef997e530891b8e3b19dd..ce7e6068768f3cebbb4bb8e32ea94e4ae6aca3a3 100644 (file)
@@ -100,9 +100,9 @@ static struct omap_hwmod omap3xxx_mpu_hwmod = {
 
 /* IVA2 (IVA2) */
 static struct omap_hwmod_rst_info omap3xxx_iva_resets[] = {
-       { .name = "logic", .rst_shift = 0 },
-       { .name = "seq0", .rst_shift = 1 },
-       { .name = "seq1", .rst_shift = 2 },
+       { .name = "logic", .rst_shift = 0, .st_shift = 8 },
+       { .name = "seq0", .rst_shift = 1, .st_shift = 9 },
+       { .name = "seq1", .rst_shift = 2, .st_shift = 10 },
 };
 
 static struct omap_hwmod omap3xxx_iva_hwmod = {
@@ -112,6 +112,15 @@ static struct omap_hwmod omap3xxx_iva_hwmod = {
        .rst_lines      = omap3xxx_iva_resets,
        .rst_lines_cnt  = ARRAY_SIZE(omap3xxx_iva_resets),
        .main_clk       = "iva2_ck",
+       .prcm = {
+               .omap2 = {
+                       .module_offs = OMAP3430_IVA2_MOD,
+                       .prcm_reg_id = 1,
+                       .module_bit = OMAP3430_CM_FCLKEN_IVA2_EN_IVA2_SHIFT,
+                       .idlest_reg_id = 1,
+                       .idlest_idle_bit = OMAP3430_ST_IVA2_SHIFT,
+               }
+       },
 };
 
 /* timer class */
index 242aee498ceb21466e33ee04035ed63147e4a615..afb60917a948b64bb3b0ed9b90924facc27fc7f6 100644 (file)
@@ -4210,7 +4210,7 @@ static struct omap_hwmod_ocp_if omap44xx_dsp__iva = {
 };
 
 /* dsp -> sl2if */
-static struct omap_hwmod_ocp_if omap44xx_dsp__sl2if = {
+static struct omap_hwmod_ocp_if __maybe_unused omap44xx_dsp__sl2if = {
        .master         = &omap44xx_dsp_hwmod,
        .slave          = &omap44xx_sl2if_hwmod,
        .clk            = "dpll_iva_m5x2_ck",
@@ -4828,7 +4828,7 @@ static struct omap_hwmod_ocp_if omap44xx_l3_main_2__iss = {
 };
 
 /* iva -> sl2if */
-static struct omap_hwmod_ocp_if omap44xx_iva__sl2if = {
+static struct omap_hwmod_ocp_if __maybe_unused omap44xx_iva__sl2if = {
        .master         = &omap44xx_iva_hwmod,
        .slave          = &omap44xx_sl2if_hwmod,
        .clk            = "dpll_iva_m5x2_ck",
@@ -5362,7 +5362,7 @@ static struct omap_hwmod_ocp_if omap44xx_l4_wkup__scrm = {
 };
 
 /* l3_main_2 -> sl2if */
-static struct omap_hwmod_ocp_if omap44xx_l3_main_2__sl2if = {
+static struct omap_hwmod_ocp_if __maybe_unused omap44xx_l3_main_2__sl2if = {
        .master         = &omap44xx_l3_main_2_hwmod,
        .slave          = &omap44xx_sl2if_hwmod,
        .clk            = "l3_div_ck",
@@ -6032,7 +6032,7 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
        &omap44xx_l4_abe__dmic,
        &omap44xx_l4_abe__dmic_dma,
        &omap44xx_dsp__iva,
-       &omap44xx_dsp__sl2if,
+       /* &omap44xx_dsp__sl2if, */
        &omap44xx_l4_cfg__dsp,
        &omap44xx_l3_main_2__dss,
        &omap44xx_l4_per__dss,
@@ -6068,7 +6068,7 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
        &omap44xx_l4_per__i2c4,
        &omap44xx_l3_main_2__ipu,
        &omap44xx_l3_main_2__iss,
-       &omap44xx_iva__sl2if,
+       /* &omap44xx_iva__sl2if, */
        &omap44xx_l3_main_2__iva,
        &omap44xx_l4_wkup__kbd,
        &omap44xx_l4_cfg__mailbox,
@@ -6099,7 +6099,7 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
        &omap44xx_l4_cfg__cm_core,
        &omap44xx_l4_wkup__prm,
        &omap44xx_l4_wkup__scrm,
-       &omap44xx_l3_main_2__sl2if,
+       /* &omap44xx_l3_main_2__sl2if, */
        &omap44xx_l4_abe__slimbus1,
        &omap44xx_l4_abe__slimbus1_dma,
        &omap44xx_l4_per__slimbus2,
index 2ff6d41ec6c6c004ace041b525ec1821d6389653..2ba4f57dda866df44689834370916395ab9a4c3c 100644 (file)
@@ -260,6 +260,7 @@ static u32 notrace dmtimer_read_sched_clock(void)
        return 0;
 }
 
+#ifdef CONFIG_OMAP_32K_TIMER
 /* Setup free-running counter for clocksource */
 static int __init omap2_sync32k_clocksource_init(void)
 {
@@ -299,6 +300,12 @@ static int __init omap2_sync32k_clocksource_init(void)
 
        return ret;
 }
+#else
+static inline int omap2_sync32k_clocksource_init(void)
+{
+       return -ENODEV;
+}
+#endif
 
 static void __init omap2_gptimer_clocksource_init(int gptimer_id,
                                                const char *fck_source)
index 0df5ae6740c6e491895cc20adfda5096437beb74..fe2c97c179d1de40ae6805cce076b46594a22101 100644 (file)
@@ -3,7 +3,7 @@
 #
 
 # Common objects
-obj-y                          := timer.o console.o clock.o common.o
+obj-y                          := timer.o console.o clock.o
 
 # CPU objects
 obj-$(CONFIG_ARCH_SH7367)      += setup-sh7367.o clock-sh7367.o intc-sh7367.o
index f172ca85905cdf68d114d046ea987e43b0570af9..264340a60f65ca9584de004d361525d8cd303027 100644 (file)
@@ -1229,6 +1229,15 @@ static struct i2c_board_info i2c1_devices[] = {
 #define USCCR1         0xE6058144
 static void __init ap4evb_init(void)
 {
+       struct pm_domain_device domain_devices[] = {
+               { "A4LC", &lcdc1_device, },
+               { "A4LC", &lcdc_device, },
+               { "A4MP", &fsi_device, },
+               { "A3SP", &sh_mmcif_device, },
+               { "A3SP", &sdhi0_device, },
+               { "A3SP", &sdhi1_device, },
+               { "A4R", &ceu_device, },
+       };
        u32 srcr4;
        struct clk *clk;
 
@@ -1461,14 +1470,8 @@ static void __init ap4evb_init(void)
 
        platform_add_devices(ap4evb_devices, ARRAY_SIZE(ap4evb_devices));
 
-       rmobile_add_device_to_domain(&sh7372_pd_a4lc, &lcdc1_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a4lc, &lcdc_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a4mp, &fsi_device);
-
-       rmobile_add_device_to_domain(&sh7372_pd_a3sp, &sh_mmcif_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a3sp, &sdhi0_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a3sp, &sdhi1_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a4r, &ceu_device);
+       rmobile_add_devices_to_domains(domain_devices,
+                                      ARRAY_SIZE(domain_devices));
 
        hdmi_init_pm_clock();
        fsi_init_pm_clock();
@@ -1483,6 +1486,6 @@ MACHINE_START(AP4EVB, "ap4evb")
        .init_irq       = sh7372_init_irq,
        .handle_irq     = shmobile_handle_irq_intc,
        .init_machine   = ap4evb_init,
-       .init_late      = shmobile_init_late,
+       .init_late      = sh7372_pm_init_late,
        .timer          = &shmobile_timer,
 MACHINE_END
index cf10f92856dcbb905712024b1d6a00e55c77cf51..da6117c326b9e0c80147050089f1d8a5f604c28a 100644 (file)
@@ -520,13 +520,14 @@ static struct platform_device hdmi_lcdc_device = {
 };
 
 /* GPIO KEY */
-#define GPIO_KEY(c, g, d) { .code = c, .gpio = g, .desc = d, .active_low = 1 }
+#define GPIO_KEY(c, g, d, ...) \
+       { .code = c, .gpio = g, .desc = d, .active_low = 1, __VA_ARGS__ }
 
 static struct gpio_keys_button gpio_buttons[] = {
-       GPIO_KEY(KEY_POWER,     GPIO_PORT99,    "SW1"),
-       GPIO_KEY(KEY_BACK,      GPIO_PORT100,   "SW2"),
-       GPIO_KEY(KEY_MENU,      GPIO_PORT97,    "SW3"),
-       GPIO_KEY(KEY_HOME,      GPIO_PORT98,    "SW4"),
+       GPIO_KEY(KEY_POWER,     GPIO_PORT99,    "SW3", .wakeup = 1),
+       GPIO_KEY(KEY_BACK,      GPIO_PORT100,   "SW4"),
+       GPIO_KEY(KEY_MENU,      GPIO_PORT97,    "SW5"),
+       GPIO_KEY(KEY_HOME,      GPIO_PORT98,    "SW6"),
 };
 
 static struct gpio_keys_platform_data gpio_key_info = {
@@ -901,8 +902,8 @@ static struct platform_device *eva_devices[] __initdata = {
        &camera_device,
        &ceu0_device,
        &fsi_device,
-       &fsi_hdmi_device,
        &fsi_wm8978_device,
+       &fsi_hdmi_device,
 };
 
 static void __init eva_clock_init(void)
@@ -1181,10 +1182,10 @@ static void __init eva_init(void)
 
        eva_clock_init();
 
-       rmobile_add_device_to_domain(&r8a7740_pd_a4lc, &lcdc0_device);
-       rmobile_add_device_to_domain(&r8a7740_pd_a4lc, &hdmi_lcdc_device);
+       rmobile_add_device_to_domain("A4LC", &lcdc0_device);
+       rmobile_add_device_to_domain("A4LC", &hdmi_lcdc_device);
        if (usb)
-               rmobile_add_device_to_domain(&r8a7740_pd_a3sp, usb);
+               rmobile_add_device_to_domain("A3SP", usb);
 }
 
 static void __init eva_earlytimer_init(void)
index 7ea2b31e31991355cb9304dfcafcc4ab6db6f628..c76776a3e70da10aaf2027537f5630b020c3f505 100644 (file)
@@ -695,6 +695,7 @@ static struct platform_device usbhs0_device = {
  *  - J30 "open"
  *  - modify usbhs1_get_id() USBHS_HOST -> USBHS_GADGET
  *  - add .get_vbus = usbhs_get_vbus in usbhs1_private
+ *  - check usbhs0_device(pio)/usbhs1_device(irq) order in mackerel_devices.
  */
 #define IRQ8 evt2irq(0x0300)
 #define USB_PHY_MODE           (1 << 4)
@@ -1325,8 +1326,8 @@ static struct platform_device *mackerel_devices[] __initdata = {
        &nor_flash_device,
        &smc911x_device,
        &lcdc_device,
-       &usbhs1_device,
        &usbhs0_device,
+       &usbhs1_device,
        &leds_device,
        &fsi_device,
        &fsi_ak4643_device,
@@ -1409,6 +1410,22 @@ static struct i2c_board_info i2c1_devices[] = {
 #define USCCR1         0xE6058144
 static void __init mackerel_init(void)
 {
+       struct pm_domain_device domain_devices[] = {
+               { "A4LC", &lcdc_device, },
+               { "A4LC", &hdmi_lcdc_device, },
+               { "A4LC", &meram_device, },
+               { "A4MP", &fsi_device, },
+               { "A3SP", &usbhs0_device, },
+               { "A3SP", &usbhs1_device, },
+               { "A3SP", &nand_flash_device, },
+               { "A3SP", &sh_mmcif_device, },
+               { "A3SP", &sdhi0_device, },
+#if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE)
+               { "A3SP", &sdhi1_device, },
+#endif
+               { "A3SP", &sdhi2_device, },
+               { "A4R", &ceu_device, },
+       };
        u32 srcr4;
        struct clk *clk;
 
@@ -1623,20 +1640,8 @@ static void __init mackerel_init(void)
 
        platform_add_devices(mackerel_devices, ARRAY_SIZE(mackerel_devices));
 
-       rmobile_add_device_to_domain(&sh7372_pd_a4lc, &lcdc_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a4lc, &hdmi_lcdc_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a4lc, &meram_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a4mp, &fsi_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a3sp, &usbhs0_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a3sp, &usbhs1_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a3sp, &nand_flash_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a3sp, &sh_mmcif_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a3sp, &sdhi0_device);
-#if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE)
-       rmobile_add_device_to_domain(&sh7372_pd_a3sp, &sdhi1_device);
-#endif
-       rmobile_add_device_to_domain(&sh7372_pd_a3sp, &sdhi2_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a4r, &ceu_device);
+       rmobile_add_devices_to_domains(domain_devices,
+                                      ARRAY_SIZE(domain_devices));
 
        hdmi_init_pm_clock();
        sh7372_pm_init();
@@ -1650,6 +1655,6 @@ MACHINE_START(MACKEREL, "mackerel")
        .init_irq       = sh7372_init_irq,
        .handle_irq     = shmobile_handle_irq_intc,
        .init_machine   = mackerel_init,
-       .init_late      = shmobile_init_late,
+       .init_late      = sh7372_pm_init_late,
        .timer          = &shmobile_timer,
 MACHINE_END
index 3a528cf4366cb6addff63fb9e7032948f71f49e2..fcf5a47f47724ccd2be52005adcef685487671a8 100644 (file)
@@ -67,7 +67,7 @@ static struct smsc911x_platform_config smsc911x_platdata = {
 
 static struct platform_device eth_device = {
        .name           = "smsc911x",
-       .id             = 0,
+       .id             = -1,
        .dev  = {
                .platform_data = &smsc911x_platdata,
        },
diff --git a/arch/arm/mach-shmobile/common.c b/arch/arm/mach-shmobile/common.c
deleted file mode 100644 (file)
index 608aba9..0000000
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
- */
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <mach/common.h>
-
-void __init shmobile_init_late(void)
-{
-       shmobile_suspend_init();
-       shmobile_cpuidle_init();
-}
index 7b541e911ab4aebe99262ef74c8ec0fb6084c01c..9e050268cde4d237ab850a532b38c22cdb37c694 100644 (file)
 #include <asm/cpuidle.h>
 #include <asm/io.h>
 
-static void shmobile_enter_wfi(void)
+int shmobile_enter_wfi(struct cpuidle_device *dev, struct cpuidle_driver *drv,
+                      int index)
 {
        cpu_do_idle();
-}
-
-void (*shmobile_cpuidle_modes[CPUIDLE_STATE_MAX])(void) = {
-       shmobile_enter_wfi, /* regular sleep mode */
-};
-
-static int shmobile_cpuidle_enter(struct cpuidle_device *dev,
-                                 struct cpuidle_driver *drv,
-                                 int index)
-{
-       shmobile_cpuidle_modes[index]();
-
-       return index;
+       return 0;
 }
 
 static struct cpuidle_device shmobile_cpuidle_dev;
-static struct cpuidle_driver shmobile_cpuidle_driver = {
+static struct cpuidle_driver shmobile_cpuidle_default_driver = {
        .name                   = "shmobile_cpuidle",
        .owner                  = THIS_MODULE,
        .en_core_tk_irqen       = 1,
        .states[0]              = ARM_CPUIDLE_WFI_STATE,
+       .states[0].enter        = shmobile_enter_wfi,
        .safe_state_index       = 0, /* C1 */
        .state_count            = 1,
 };
 
-void (*shmobile_cpuidle_setup)(struct cpuidle_driver *drv);
+static struct cpuidle_driver *cpuidle_drv = &shmobile_cpuidle_default_driver;
+
+void shmobile_cpuidle_set_driver(struct cpuidle_driver *drv)
+{
+       cpuidle_drv = drv;
+}
 
 int shmobile_cpuidle_init(void)
 {
        struct cpuidle_device *dev = &shmobile_cpuidle_dev;
-       struct cpuidle_driver *drv = &shmobile_cpuidle_driver;
-       int i;
-
-       for (i = 0; i < CPUIDLE_STATE_MAX; i++)
-               drv->states[i].enter = shmobile_cpuidle_enter;
-
-       if (shmobile_cpuidle_setup)
-               shmobile_cpuidle_setup(drv);
 
-       cpuidle_register_driver(drv);
+       cpuidle_register_driver(cpuidle_drv);
 
-       dev->state_count = drv->state_count;
+       dev->state_count = cpuidle_drv->state_count;
        cpuidle_register_device(dev);
 
        return 0;
index 45e61dada030ba263fd721d4210d477e8664faba..eb89293fff4d36068a7fc3baf305bd7010209799 100644 (file)
@@ -14,8 +14,10 @@ extern int shmobile_clk_init(void);
 extern void shmobile_handle_irq_intc(struct pt_regs *);
 extern struct platform_suspend_ops shmobile_suspend_ops;
 struct cpuidle_driver;
-extern void (*shmobile_cpuidle_modes[])(void);
-extern void (*shmobile_cpuidle_setup)(struct cpuidle_driver *drv);
+struct cpuidle_device;
+extern int shmobile_enter_wfi(struct cpuidle_device *dev,
+                             struct cpuidle_driver *drv, int index);
+extern void shmobile_cpuidle_set_driver(struct cpuidle_driver *drv);
 
 extern void sh7367_init_irq(void);
 extern void sh7367_map_io(void);
@@ -86,8 +88,6 @@ extern int r8a7779_boot_secondary(unsigned int cpu);
 extern void r8a7779_smp_prepare_cpus(void);
 extern void r8a7779_register_twd(void);
 
-extern void shmobile_init_late(void);
-
 #ifdef CONFIG_SUSPEND
 int shmobile_suspend_init(void);
 #else
@@ -100,4 +100,10 @@ int shmobile_cpuidle_init(void);
 static inline int shmobile_cpuidle_init(void) { return 0; }
 #endif
 
+static inline void shmobile_init_late(void)
+{
+       shmobile_suspend_init();
+       shmobile_cpuidle_init();
+}
+
 #endif /* __ARCH_MACH_COMMON_H */
index 5a402840fe28afe2d630e8496d1edb6a69b0a9d8..690553a06887897a6c29ffb396ba60bafb29be8e 100644 (file)
@@ -12,6 +12,8 @@
 
 #include <linux/pm_domain.h>
 
+#define DEFAULT_DEV_LATENCY_NS 250000
+
 struct platform_device;
 
 struct rmobile_pm_domain {
@@ -29,16 +31,33 @@ struct rmobile_pm_domain *to_rmobile_pd(struct generic_pm_domain *d)
        return container_of(d, struct rmobile_pm_domain, genpd);
 }
 
+struct pm_domain_device {
+       const char *domain_name;
+       struct platform_device *pdev;
+};
+
 #ifdef CONFIG_PM
-extern void rmobile_init_pm_domain(struct rmobile_pm_domain *rmobile_pd);
-extern void rmobile_add_device_to_domain(struct rmobile_pm_domain *rmobile_pd,
-                                       struct platform_device *pdev);
-extern void rmobile_pm_add_subdomain(struct rmobile_pm_domain *rmobile_pd,
-                                   struct rmobile_pm_domain *rmobile_sd);
+extern void rmobile_init_domains(struct rmobile_pm_domain domains[], int num);
+extern void rmobile_add_device_to_domain_td(const char *domain_name,
+                                           struct platform_device *pdev,
+                                           struct gpd_timing_data *td);
+
+static inline void rmobile_add_device_to_domain(const char *domain_name,
+                                               struct platform_device *pdev)
+{
+       rmobile_add_device_to_domain_td(domain_name, pdev, NULL);
+}
+
+extern void rmobile_add_devices_to_domains(struct pm_domain_device data[],
+                                          int size);
 #else
-#define rmobile_init_pm_domain(pd) do { } while (0)
-#define rmobile_add_device_to_domain(pd, pdev) do { } while (0)
-#define rmobile_pm_add_subdomain(pd, sd) do { } while (0)
+
+#define rmobile_init_domains(domains, num) do { } while (0)
+#define rmobile_add_device_to_domain_td(name, pdev, td) do { } while (0)
+#define rmobile_add_device_to_domain(name, pdev) do { } while (0)
+
+static inline void rmobile_add_devices_to_domains(struct pm_domain_device d[],
+                                                 int size) {}
 #endif /* CONFIG_PM */
 
 #endif /* PM_RMOBILE_H */
index 7143147780df55f6e17a689e9cc67ed2a9cd17e9..59d252f4cf975b3ecd64ffde955f8e6f387f7651 100644 (file)
@@ -607,9 +607,9 @@ enum {
 };
 
 #ifdef CONFIG_PM
-extern struct rmobile_pm_domain r8a7740_pd_a4s;
-extern struct rmobile_pm_domain r8a7740_pd_a3sp;
-extern struct rmobile_pm_domain r8a7740_pd_a4lc;
+extern void __init r8a7740_init_pm_domains(void);
+#else
+static inline void r8a7740_init_pm_domains(void) {}
 #endif /* CONFIG_PM */
 
 #endif /* __ASM_R8A7740_H__ */
index b07ad318eb2ec47577b2893bd9e8a7ba71c39d3d..7ad47977d2e7ba15e1b0674b0b96c292e050dd3d 100644 (file)
@@ -347,17 +347,9 @@ extern int r8a7779_sysc_power_down(struct r8a7779_pm_ch *r8a7779_ch);
 extern int r8a7779_sysc_power_up(struct r8a7779_pm_ch *r8a7779_ch);
 
 #ifdef CONFIG_PM
-extern struct r8a7779_pm_domain r8a7779_sh4a;
-extern struct r8a7779_pm_domain r8a7779_sgx;
-extern struct r8a7779_pm_domain r8a7779_vdp1;
-extern struct r8a7779_pm_domain r8a7779_impx3;
-
-extern void r8a7779_init_pm_domain(struct r8a7779_pm_domain *r8a7779_pd);
-extern void r8a7779_add_device_to_domain(struct r8a7779_pm_domain *r8a7779_pd,
-                                       struct platform_device *pdev);
+extern void __init r8a7779_init_pm_domains(void);
 #else
-#define r8a7779_init_pm_domain(pd) do { } while (0)
-#define r8a7779_add_device_to_domain(pd, pdev) do { } while (0)
+static inline void r8a7779_init_pm_domains(void) {}
 #endif /* CONFIG_PM */
 
 #endif /* __ASM_R8A7779_H__ */
index b59048e6d8fd7302717b9bb4f9359eae95ce10e5..eb98b45c508902c631153c093ed14aea877893d5 100644 (file)
@@ -478,21 +478,17 @@ extern struct clk sh7372_fsibck_clk;
 extern struct clk sh7372_fsidiva_clk;
 extern struct clk sh7372_fsidivb_clk;
 
-#ifdef CONFIG_PM
-extern struct rmobile_pm_domain sh7372_pd_a4lc;
-extern struct rmobile_pm_domain sh7372_pd_a4mp;
-extern struct rmobile_pm_domain sh7372_pd_d4;
-extern struct rmobile_pm_domain sh7372_pd_a4r;
-extern struct rmobile_pm_domain sh7372_pd_a3rv;
-extern struct rmobile_pm_domain sh7372_pd_a3ri;
-extern struct rmobile_pm_domain sh7372_pd_a4s;
-extern struct rmobile_pm_domain sh7372_pd_a3sp;
-extern struct rmobile_pm_domain sh7372_pd_a3sg;
-#endif /* CONFIG_PM */
-
 extern void sh7372_intcs_suspend(void);
 extern void sh7372_intcs_resume(void);
 extern void sh7372_intca_suspend(void);
 extern void sh7372_intca_resume(void);
 
+#ifdef CONFIG_PM
+extern void __init sh7372_init_pm_domains(void);
+#else
+static inline void sh7372_init_pm_domains(void) {}
+#endif
+
+extern void __init sh7372_pm_init_late(void);
+
 #endif /* __ASM_SH7372_H__ */
index ee447404c857ed5794eaf5cbe1153a9e2e2d5505..588555a67d9c438ffb4d0ea0908923056554f635 100644 (file)
@@ -259,9 +259,9 @@ static int sh73a0_set_wake(struct irq_data *data, unsigned int on)
        return 0; /* always allow wakeup */
 }
 
-#define RELOC_BASE 0x1000
+#define RELOC_BASE 0x1200
 
-/* INTCA IRQ pins at INTCS + 0x1000 to make space for GIC+INTC handling */
+/* INTCA IRQ pins at INTCS + RELOC_BASE to make space for GIC+INTC handling */
 #define INTCS_VECT_RELOC(n, vect) INTCS_VECT((n), (vect) + RELOC_BASE)
 
 INTC_IRQ_PINS_32(intca_irq_pins, 0xe6900000,
index 893504d012a6bb98b583b1660b156325496eabf0..21e5316d2d881aea42b507334b9fcedd139c760d 100644 (file)
@@ -21,14 +21,6 @@ static int r8a7740_pd_a4s_suspend(void)
        return -EBUSY;
 }
 
-struct rmobile_pm_domain r8a7740_pd_a4s = {
-       .genpd.name     = "A4S",
-       .bit_shift      = 10,
-       .gov            = &pm_domain_always_on_gov,
-       .no_debug       = true,
-       .suspend        = r8a7740_pd_a4s_suspend,
-};
-
 static int r8a7740_pd_a3sp_suspend(void)
 {
        /*
@@ -38,17 +30,31 @@ static int r8a7740_pd_a3sp_suspend(void)
        return console_suspend_enabled ? 0 : -EBUSY;
 }
 
-struct rmobile_pm_domain r8a7740_pd_a3sp = {
-       .genpd.name     = "A3SP",
-       .bit_shift      = 11,
-       .gov            = &pm_domain_always_on_gov,
-       .no_debug       = true,
-       .suspend        = r8a7740_pd_a3sp_suspend,
+static struct rmobile_pm_domain r8a7740_pm_domains[] = {
+       {
+               .genpd.name     = "A4S",
+               .bit_shift      = 10,
+               .gov            = &pm_domain_always_on_gov,
+               .no_debug       = true,
+               .suspend        = r8a7740_pd_a4s_suspend,
+       },
+       {
+               .genpd.name     = "A3SP",
+               .bit_shift      = 11,
+               .gov            = &pm_domain_always_on_gov,
+               .no_debug       = true,
+               .suspend        = r8a7740_pd_a3sp_suspend,
+       },
+       {
+               .genpd.name     = "A4LC",
+               .bit_shift      = 1,
+       },
 };
 
-struct rmobile_pm_domain r8a7740_pd_a4lc = {
-       .genpd.name     = "A4LC",
-       .bit_shift      = 1,
-};
+void __init r8a7740_init_pm_domains(void)
+{
+       rmobile_init_domains(r8a7740_pm_domains, ARRAY_SIZE(r8a7740_pm_domains));
+       pm_genpd_add_subdomain_names("A4S", "A3SP");
+}
 
 #endif /* CONFIG_PM */
index a18a4ae16d2bf7ff9072cda4bb70eae8392cf9f1..d50a8e9b94a4f9ac7030bdc49dc5450674e5af9a 100644 (file)
@@ -183,7 +183,7 @@ static bool pd_active_wakeup(struct device *dev)
        return true;
 }
 
-void r8a7779_init_pm_domain(struct r8a7779_pm_domain *r8a7779_pd)
+static void r8a7779_init_pm_domain(struct r8a7779_pm_domain *r8a7779_pd)
 {
        struct generic_pm_domain *genpd = &r8a7779_pd->genpd;
 
@@ -199,43 +199,44 @@ void r8a7779_init_pm_domain(struct r8a7779_pm_domain *r8a7779_pd)
                pd_power_up(&r8a7779_pd->genpd);
 }
 
-void r8a7779_add_device_to_domain(struct r8a7779_pm_domain *r8a7779_pd,
-                                struct platform_device *pdev)
-{
-       struct device *dev = &pdev->dev;
-
-       pm_genpd_add_device(&r8a7779_pd->genpd, dev);
-       if (pm_clk_no_clocks(dev))
-               pm_clk_add(dev, NULL);
-}
-
-struct r8a7779_pm_domain r8a7779_sh4a = {
-       .ch = {
-               .chan_offs = 0x80, /* PWRSR1 .. PWRER1 */
-               .isr_bit = 16, /* SH4A */
-       }
-};
-
-struct r8a7779_pm_domain r8a7779_sgx = {
-       .ch = {
-               .chan_offs = 0xc0, /* PWRSR2 .. PWRER2 */
-               .isr_bit = 20, /* SGX */
-       }
+static struct r8a7779_pm_domain r8a7779_pm_domains[] = {
+       {
+               .genpd.name = "SH4A",
+               .ch = {
+                       .chan_offs = 0x80, /* PWRSR1 .. PWRER1 */
+                       .isr_bit = 16, /* SH4A */
+               },
+       },
+       {
+               .genpd.name = "SGX",
+               .ch = {
+                       .chan_offs = 0xc0, /* PWRSR2 .. PWRER2 */
+                       .isr_bit = 20, /* SGX */
+               },
+       },
+       {
+               .genpd.name = "VDP1",
+               .ch = {
+                       .chan_offs = 0x100, /* PWRSR3 .. PWRER3 */
+                       .isr_bit = 21, /* VDP */
+               },
+       },
+       {
+               .genpd.name = "IMPX3",
+               .ch = {
+                       .chan_offs = 0x140, /* PWRSR4 .. PWRER4 */
+                       .isr_bit = 24, /* IMP */
+               },
+       },
 };
 
-struct r8a7779_pm_domain r8a7779_vdp1 = {
-       .ch = {
-               .chan_offs = 0x100, /* PWRSR3 .. PWRER3 */
-               .isr_bit = 21, /* VDP */
-       }
-};
+void __init r8a7779_init_pm_domains(void)
+{
+       int j;
 
-struct r8a7779_pm_domain r8a7779_impx3 = {
-       .ch = {
-               .chan_offs = 0x140, /* PWRSR4 .. PWRER4 */
-               .isr_bit = 24, /* IMP */
-       }
-};
+       for (j = 0; j < ARRAY_SIZE(r8a7779_pm_domains); j++)
+               r8a7779_init_pm_domain(&r8a7779_pm_domains[j]);
+}
 
 #endif /* CONFIG_PM */
 
index a8562540f1d64f44cd87d12960e2f82d4440a69d..d37d368434da84399c6159295c397dc10f652158 100644 (file)
@@ -134,7 +134,7 @@ static int rmobile_pd_start_dev(struct device *dev)
        return ret;
 }
 
-void rmobile_init_pm_domain(struct rmobile_pm_domain *rmobile_pd)
+static void rmobile_init_pm_domain(struct rmobile_pm_domain *rmobile_pd)
 {
        struct generic_pm_domain *genpd = &rmobile_pd->genpd;
        struct dev_power_governor *gov = rmobile_pd->gov;
@@ -149,19 +149,38 @@ void rmobile_init_pm_domain(struct rmobile_pm_domain *rmobile_pd)
        __rmobile_pd_power_up(rmobile_pd, false);
 }
 
-void rmobile_add_device_to_domain(struct rmobile_pm_domain *rmobile_pd,
-                                struct platform_device *pdev)
+void rmobile_init_domains(struct rmobile_pm_domain domains[], int num)
+{
+       int j;
+
+       for (j = 0; j < num; j++)
+               rmobile_init_pm_domain(&domains[j]);
+}
+
+void rmobile_add_device_to_domain_td(const char *domain_name,
+                                    struct platform_device *pdev,
+                                    struct gpd_timing_data *td)
 {
        struct device *dev = &pdev->dev;
 
-       pm_genpd_add_device(&rmobile_pd->genpd, dev);
+       __pm_genpd_name_add_device(domain_name, dev, td);
        if (pm_clk_no_clocks(dev))
                pm_clk_add(dev, NULL);
 }
 
-void rmobile_pm_add_subdomain(struct rmobile_pm_domain *rmobile_pd,
-                            struct rmobile_pm_domain *rmobile_sd)
+void rmobile_add_devices_to_domains(struct pm_domain_device data[],
+                                   int size)
 {
-       pm_genpd_add_subdomain(&rmobile_pd->genpd, &rmobile_sd->genpd);
+       struct gpd_timing_data latencies = {
+               .stop_latency_ns = DEFAULT_DEV_LATENCY_NS,
+               .start_latency_ns = DEFAULT_DEV_LATENCY_NS,
+               .save_state_latency_ns = DEFAULT_DEV_LATENCY_NS,
+               .restore_state_latency_ns = DEFAULT_DEV_LATENCY_NS,
+       };
+       int j;
+
+       for (j = 0; j < size; j++)
+               rmobile_add_device_to_domain_td(data[j].domain_name,
+                                               data[j].pdev, &latencies);
 }
 #endif /* CONFIG_PM */
index 79203706922651dad75595869802fa3520c78a8c..a7a5e20ae9a0df62e43493efff406bc8878895b9 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/irq.h>
 #include <linux/bitrev.h>
 #include <linux/console.h>
+#include <asm/cpuidle.h>
 #include <asm/io.h>
 #include <asm/tlbflush.h>
 #include <asm/suspend.h>
 
 #ifdef CONFIG_PM
 
-struct rmobile_pm_domain sh7372_pd_a4lc = {
-       .genpd.name = "A4LC",
-       .bit_shift = 1,
-};
-
-struct rmobile_pm_domain sh7372_pd_a4mp = {
-       .genpd.name = "A4MP",
-       .bit_shift = 2,
-};
-
-struct rmobile_pm_domain sh7372_pd_d4 = {
-       .genpd.name = "D4",
-       .bit_shift = 3,
-};
+#define PM_DOMAIN_ON_OFF_LATENCY_NS    250000
 
 static int sh7372_a4r_pd_suspend(void)
 {
@@ -93,39 +81,25 @@ static int sh7372_a4r_pd_suspend(void)
        return 0;
 }
 
-struct rmobile_pm_domain sh7372_pd_a4r = {
-       .genpd.name = "A4R",
-       .bit_shift = 5,
-       .suspend = sh7372_a4r_pd_suspend,
-       .resume = sh7372_intcs_resume,
-};
+static bool a4s_suspend_ready;
 
-struct rmobile_pm_domain sh7372_pd_a3rv = {
-       .genpd.name = "A3RV",
-       .bit_shift = 6,
-};
-
-struct rmobile_pm_domain sh7372_pd_a3ri = {
-       .genpd.name = "A3RI",
-       .bit_shift = 8,
-};
-
-static int sh7372_pd_a4s_suspend(void)
+static int sh7372_a4s_pd_suspend(void)
 {
        /*
         * The A4S domain contains the CPU core and therefore it should
-        * only be turned off if the CPU is in use.
+        * only be turned off if the CPU is not in use.  This may happen
+        * during system suspend, when SYSC is going to be used for generating
+        * resume signals and a4s_suspend_ready is set to let
+        * sh7372_enter_suspend() know that it can turn A4S off.
         */
+       a4s_suspend_ready = true;
        return -EBUSY;
 }
 
-struct rmobile_pm_domain sh7372_pd_a4s = {
-       .genpd.name = "A4S",
-       .bit_shift = 10,
-       .gov = &pm_domain_always_on_gov,
-       .no_debug = true,
-       .suspend = sh7372_pd_a4s_suspend,
-};
+static void sh7372_a4s_pd_resume(void)
+{
+       a4s_suspend_ready = false;
+}
 
 static int sh7372_a3sp_pd_suspend(void)
 {
@@ -136,18 +110,80 @@ static int sh7372_a3sp_pd_suspend(void)
        return console_suspend_enabled ? 0 : -EBUSY;
 }
 
-struct rmobile_pm_domain sh7372_pd_a3sp = {
-       .genpd.name = "A3SP",
-       .bit_shift = 11,
-       .gov = &pm_domain_always_on_gov,
-       .no_debug = true,
-       .suspend = sh7372_a3sp_pd_suspend,
+static struct rmobile_pm_domain sh7372_pm_domains[] = {
+       {
+               .genpd.name = "A4LC",
+               .genpd.power_on_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
+               .genpd.power_off_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
+               .bit_shift = 1,
+       },
+       {
+               .genpd.name = "A4MP",
+               .genpd.power_on_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
+               .genpd.power_off_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
+               .bit_shift = 2,
+       },
+       {
+               .genpd.name = "D4",
+               .genpd.power_on_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
+               .genpd.power_off_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
+               .bit_shift = 3,
+       },
+       {
+               .genpd.name = "A4R",
+               .genpd.power_on_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
+               .genpd.power_off_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
+               .bit_shift = 5,
+               .suspend = sh7372_a4r_pd_suspend,
+               .resume = sh7372_intcs_resume,
+       },
+       {
+               .genpd.name = "A3RV",
+               .genpd.power_on_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
+               .genpd.power_off_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
+               .bit_shift = 6,
+       },
+       {
+               .genpd.name = "A3RI",
+               .genpd.power_on_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
+               .genpd.power_off_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
+               .bit_shift = 8,
+       },
+       {
+               .genpd.name = "A4S",
+               .genpd.power_on_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
+               .genpd.power_off_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
+               .bit_shift = 10,
+               .gov = &pm_domain_always_on_gov,
+               .no_debug = true,
+               .suspend = sh7372_a4s_pd_suspend,
+               .resume = sh7372_a4s_pd_resume,
+       },
+       {
+               .genpd.name = "A3SP",
+               .genpd.power_on_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
+               .genpd.power_off_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
+               .bit_shift = 11,
+               .gov = &pm_domain_always_on_gov,
+               .no_debug = true,
+               .suspend = sh7372_a3sp_pd_suspend,
+       },
+       {
+               .genpd.name = "A3SG",
+               .genpd.power_on_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
+               .genpd.power_off_latency_ns = PM_DOMAIN_ON_OFF_LATENCY_NS,
+               .bit_shift = 13,
+       },
 };
 
-struct rmobile_pm_domain sh7372_pd_a3sg = {
-       .genpd.name = "A3SG",
-       .bit_shift = 13,
-};
+void __init sh7372_init_pm_domains(void)
+{
+       rmobile_init_domains(sh7372_pm_domains, ARRAY_SIZE(sh7372_pm_domains));
+       pm_genpd_add_subdomain_names("A4LC", "A3RV");
+       pm_genpd_add_subdomain_names("A4R", "A4LC");
+       pm_genpd_add_subdomain_names("A4S", "A3SG");
+       pm_genpd_add_subdomain_names("A4S", "A3SP");
+}
 
 #endif /* CONFIG_PM */
 
@@ -303,6 +339,21 @@ static void sh7372_enter_a3sm_common(int pllc0_on)
        sh7372_set_reset_vector(__pa(sh7372_resume_core_standby_sysc));
        sh7372_enter_sysc(pllc0_on, 1 << 12);
 }
+
+static void sh7372_enter_a4s_common(int pllc0_on)
+{
+       sh7372_intca_suspend();
+       sh7372_set_reset_vector(SMFRAM);
+       sh7372_enter_sysc(pllc0_on, 1 << 10);
+       sh7372_intca_resume();
+}
+
+static void sh7372_pm_setup_smfram(void)
+{
+       memcpy((void *)SMFRAM, sh7372_resume_core_standby_sysc, 0x100);
+}
+#else
+static inline void sh7372_pm_setup_smfram(void) {}
 #endif /* CONFIG_SUSPEND || CONFIG_CPU_IDLE */
 
 #ifdef CONFIG_CPU_IDLE
@@ -312,7 +363,8 @@ static int sh7372_do_idle_core_standby(unsigned long unused)
        return 0;
 }
 
-static void sh7372_enter_core_standby(void)
+static int sh7372_enter_core_standby(struct cpuidle_device *dev,
+                                    struct cpuidle_driver *drv, int index)
 {
        sh7372_set_reset_vector(__pa(sh7372_resume_core_standby_sysc));
 
@@ -323,83 +375,102 @@ static void sh7372_enter_core_standby(void)
 
         /* disable reset vector translation */
        __raw_writel(0, SBAR);
+
+       return 1;
 }
 
-static void sh7372_enter_a3sm_pll_on(void)
+static int sh7372_enter_a3sm_pll_on(struct cpuidle_device *dev,
+                                   struct cpuidle_driver *drv, int index)
 {
        sh7372_enter_a3sm_common(1);
+       return 2;
 }
 
-static void sh7372_enter_a3sm_pll_off(void)
+static int sh7372_enter_a3sm_pll_off(struct cpuidle_device *dev,
+                                    struct cpuidle_driver *drv, int index)
 {
        sh7372_enter_a3sm_common(0);
+       return 3;
 }
 
-static void sh7372_cpuidle_setup(struct cpuidle_driver *drv)
+static int sh7372_enter_a4s(struct cpuidle_device *dev,
+                           struct cpuidle_driver *drv, int index)
 {
-       struct cpuidle_state *state = &drv->states[drv->state_count];
-
-       snprintf(state->name, CPUIDLE_NAME_LEN, "C2");
-       strncpy(state->desc, "Core Standby Mode", CPUIDLE_DESC_LEN);
-       state->exit_latency = 10;
-       state->target_residency = 20 + 10;
-       state->flags = CPUIDLE_FLAG_TIME_VALID;
-       shmobile_cpuidle_modes[drv->state_count] = sh7372_enter_core_standby;
-       drv->state_count++;
-
-       state = &drv->states[drv->state_count];
-       snprintf(state->name, CPUIDLE_NAME_LEN, "C3");
-       strncpy(state->desc, "A3SM PLL ON", CPUIDLE_DESC_LEN);
-       state->exit_latency = 20;
-       state->target_residency = 30 + 20;
-       state->flags = CPUIDLE_FLAG_TIME_VALID;
-       shmobile_cpuidle_modes[drv->state_count] = sh7372_enter_a3sm_pll_on;
-       drv->state_count++;
-
-       state = &drv->states[drv->state_count];
-       snprintf(state->name, CPUIDLE_NAME_LEN, "C4");
-       strncpy(state->desc, "A3SM PLL OFF", CPUIDLE_DESC_LEN);
-       state->exit_latency = 120;
-       state->target_residency = 30 + 120;
-       state->flags = CPUIDLE_FLAG_TIME_VALID;
-       shmobile_cpuidle_modes[drv->state_count] = sh7372_enter_a3sm_pll_off;
-       drv->state_count++;
+       unsigned long msk, msk2;
+
+       if (!sh7372_sysc_valid(&msk, &msk2))
+               return sh7372_enter_a3sm_pll_off(dev, drv, index);
+
+       sh7372_setup_sysc(msk, msk2);
+       sh7372_enter_a4s_common(0);
+       return 4;
 }
 
+static struct cpuidle_driver sh7372_cpuidle_driver = {
+       .name                   = "sh7372_cpuidle",
+       .owner                  = THIS_MODULE,
+       .en_core_tk_irqen       = 1,
+       .state_count            = 5,
+       .safe_state_index       = 0, /* C1 */
+       .states[0] = ARM_CPUIDLE_WFI_STATE,
+       .states[0].enter = shmobile_enter_wfi,
+       .states[1] = {
+               .name = "C2",
+               .desc = "Core Standby Mode",
+               .exit_latency = 10,
+               .target_residency = 20 + 10,
+               .flags = CPUIDLE_FLAG_TIME_VALID,
+               .enter = sh7372_enter_core_standby,
+       },
+       .states[2] = {
+               .name = "C3",
+               .desc = "A3SM PLL ON",
+               .exit_latency = 20,
+               .target_residency = 30 + 20,
+               .flags = CPUIDLE_FLAG_TIME_VALID,
+               .enter = sh7372_enter_a3sm_pll_on,
+       },
+       .states[3] = {
+               .name = "C4",
+               .desc = "A3SM PLL OFF",
+               .exit_latency = 120,
+               .target_residency = 30 + 120,
+               .flags = CPUIDLE_FLAG_TIME_VALID,
+               .enter = sh7372_enter_a3sm_pll_off,
+       },
+       .states[4] = {
+               .name = "C5",
+               .desc = "A4S PLL OFF",
+               .exit_latency = 240,
+               .target_residency = 30 + 240,
+               .flags = CPUIDLE_FLAG_TIME_VALID,
+               .enter = sh7372_enter_a4s,
+               .disabled = true,
+       },
+};
+
 static void sh7372_cpuidle_init(void)
 {
-       shmobile_cpuidle_setup = sh7372_cpuidle_setup;
+       shmobile_cpuidle_set_driver(&sh7372_cpuidle_driver);
 }
 #else
 static void sh7372_cpuidle_init(void) {}
 #endif
 
 #ifdef CONFIG_SUSPEND
-static void sh7372_enter_a4s_common(int pllc0_on)
-{
-       sh7372_intca_suspend();
-       memcpy((void *)SMFRAM, sh7372_resume_core_standby_sysc, 0x100);
-       sh7372_set_reset_vector(SMFRAM);
-       sh7372_enter_sysc(pllc0_on, 1 << 10);
-       sh7372_intca_resume();
-}
-
 static int sh7372_enter_suspend(suspend_state_t suspend_state)
 {
        unsigned long msk, msk2;
 
        /* check active clocks to determine potential wakeup sources */
-       if (sh7372_sysc_valid(&msk, &msk2)) {
-               if (!console_suspend_enabled &&
-                   sh7372_pd_a4s.genpd.status == GPD_STATE_POWER_OFF) {
-                       /* convert INTC mask/sense to SYSC mask/sense */
-                       sh7372_setup_sysc(msk, msk2);
-
-                       /* enter A4S sleep with PLLC0 off */
-                       pr_debug("entering A4S\n");
-                       sh7372_enter_a4s_common(0);
-                       return 0;
-               }
+       if (sh7372_sysc_valid(&msk, &msk2) && a4s_suspend_ready) {
+               /* convert INTC mask/sense to SYSC mask/sense */
+               sh7372_setup_sysc(msk, msk2);
+
+               /* enter A4S sleep with PLLC0 off */
+               pr_debug("entering A4S\n");
+               sh7372_enter_a4s_common(0);
+               return 0;
        }
 
        /* default to enter A3SM sleep with PLLC0 off */
@@ -425,7 +496,7 @@ static int sh7372_pm_notifier_fn(struct notifier_block *notifier,
                 * executed during system suspend and resume, respectively, so
                 * that those functions don't crash while accessing the INTCS.
                 */
-               pm_genpd_poweron(&sh7372_pd_a4r.genpd);
+               pm_genpd_name_poweron("A4R");
                break;
        case PM_POST_SUSPEND:
                pm_genpd_poweroff_unused();
@@ -454,6 +525,14 @@ void __init sh7372_pm_init(void)
        /* do not convert A3SM, A3SP, A3SG, A4R power down into A4S */
        __raw_writel(0, PDNSEL);
 
+       sh7372_pm_setup_smfram();
+
        sh7372_suspend_init();
        sh7372_cpuidle_init();
 }
+
+void __init sh7372_pm_init_late(void)
+{
+       shmobile_init_late();
+       pm_genpd_name_attach_cpuidle("A4S", 4);
+}
index 78948a9dba0ec47547f5fca10b7aa71452600d7b..11bb1d9841975ef9be12c1591fae32c80903b55e 100644 (file)
@@ -673,12 +673,7 @@ void __init r8a7740_add_standard_devices(void)
        r8a7740_i2c_workaround(&i2c0_device);
        r8a7740_i2c_workaround(&i2c1_device);
 
-       /* PM domain */
-       rmobile_init_pm_domain(&r8a7740_pd_a4s);
-       rmobile_init_pm_domain(&r8a7740_pd_a3sp);
-       rmobile_init_pm_domain(&r8a7740_pd_a4lc);
-
-       rmobile_pm_add_subdomain(&r8a7740_pd_a4s, &r8a7740_pd_a3sp);
+       r8a7740_init_pm_domains();
 
        /* add devices */
        platform_add_devices(r8a7740_early_devices,
@@ -688,16 +683,16 @@ void __init r8a7740_add_standard_devices(void)
 
        /* add devices to PM domain  */
 
-       rmobile_add_device_to_domain(&r8a7740_pd_a3sp,  &scif0_device);
-       rmobile_add_device_to_domain(&r8a7740_pd_a3sp,  &scif1_device);
-       rmobile_add_device_to_domain(&r8a7740_pd_a3sp,  &scif2_device);
-       rmobile_add_device_to_domain(&r8a7740_pd_a3sp,  &scif3_device);
-       rmobile_add_device_to_domain(&r8a7740_pd_a3sp,  &scif4_device);
-       rmobile_add_device_to_domain(&r8a7740_pd_a3sp,  &scif5_device);
-       rmobile_add_device_to_domain(&r8a7740_pd_a3sp,  &scif6_device);
-       rmobile_add_device_to_domain(&r8a7740_pd_a3sp,  &scif7_device);
-       rmobile_add_device_to_domain(&r8a7740_pd_a3sp,  &scifb_device);
-       rmobile_add_device_to_domain(&r8a7740_pd_a3sp,  &i2c1_device);
+       rmobile_add_device_to_domain("A3SP",    &scif0_device);
+       rmobile_add_device_to_domain("A3SP",    &scif1_device);
+       rmobile_add_device_to_domain("A3SP",    &scif2_device);
+       rmobile_add_device_to_domain("A3SP",    &scif3_device);
+       rmobile_add_device_to_domain("A3SP",    &scif4_device);
+       rmobile_add_device_to_domain("A3SP",    &scif5_device);
+       rmobile_add_device_to_domain("A3SP",    &scif6_device);
+       rmobile_add_device_to_domain("A3SP",    &scif7_device);
+       rmobile_add_device_to_domain("A3SP",    &scifb_device);
+       rmobile_add_device_to_domain("A3SP",    &i2c1_device);
 }
 
 static void __init r8a7740_earlytimer_init(void)
index e98e46f6cf5508f8b1750c7abb2b2effd45a80db..2917668f0091c1042a70e512f50c12bacdb3ebb2 100644 (file)
@@ -251,10 +251,7 @@ void __init r8a7779_add_standard_devices(void)
 #endif
        r8a7779_pm_init();
 
-       r8a7779_init_pm_domain(&r8a7779_sh4a);
-       r8a7779_init_pm_domain(&r8a7779_sgx);
-       r8a7779_init_pm_domain(&r8a7779_vdp1);
-       r8a7779_init_pm_domain(&r8a7779_impx3);
+       r8a7779_init_pm_domains();
 
        platform_add_devices(r8a7779_early_devices,
                            ARRAY_SIZE(r8a7779_early_devices));
index 838a87be1d5c31cc46a932d58f27a73f00066ca9..a07954fbcd22b9017b588ef1f745bddf101bf419 100644 (file)
@@ -1001,21 +1001,34 @@ static struct platform_device *sh7372_late_devices[] __initdata = {
 
 void __init sh7372_add_standard_devices(void)
 {
-       rmobile_init_pm_domain(&sh7372_pd_a4lc);
-       rmobile_init_pm_domain(&sh7372_pd_a4mp);
-       rmobile_init_pm_domain(&sh7372_pd_d4);
-       rmobile_init_pm_domain(&sh7372_pd_a4r);
-       rmobile_init_pm_domain(&sh7372_pd_a3rv);
-       rmobile_init_pm_domain(&sh7372_pd_a3ri);
-       rmobile_init_pm_domain(&sh7372_pd_a4s);
-       rmobile_init_pm_domain(&sh7372_pd_a3sp);
-       rmobile_init_pm_domain(&sh7372_pd_a3sg);
-
-       rmobile_pm_add_subdomain(&sh7372_pd_a4lc, &sh7372_pd_a3rv);
-       rmobile_pm_add_subdomain(&sh7372_pd_a4r, &sh7372_pd_a4lc);
-
-       rmobile_pm_add_subdomain(&sh7372_pd_a4s, &sh7372_pd_a3sg);
-       rmobile_pm_add_subdomain(&sh7372_pd_a4s, &sh7372_pd_a3sp);
+       struct pm_domain_device domain_devices[] = {
+               { "A3RV", &vpu_device, },
+               { "A4MP", &spu0_device, },
+               { "A4MP", &spu1_device, },
+               { "A3SP", &scif0_device, },
+               { "A3SP", &scif1_device, },
+               { "A3SP", &scif2_device, },
+               { "A3SP", &scif3_device, },
+               { "A3SP", &scif4_device, },
+               { "A3SP", &scif5_device, },
+               { "A3SP", &scif6_device, },
+               { "A3SP", &iic1_device, },
+               { "A3SP", &dma0_device, },
+               { "A3SP", &dma1_device, },
+               { "A3SP", &dma2_device, },
+               { "A3SP", &usb_dma0_device, },
+               { "A3SP", &usb_dma1_device, },
+               { "A4R", &iic0_device, },
+               { "A4R", &veu0_device, },
+               { "A4R", &veu1_device, },
+               { "A4R", &veu2_device, },
+               { "A4R", &veu3_device, },
+               { "A4R", &jpu_device, },
+               { "A4R", &tmu00_device, },
+               { "A4R", &tmu01_device, },
+       };
+
+       sh7372_init_pm_domains();
 
        platform_add_devices(sh7372_early_devices,
                            ARRAY_SIZE(sh7372_early_devices));
@@ -1023,30 +1036,8 @@ void __init sh7372_add_standard_devices(void)
        platform_add_devices(sh7372_late_devices,
                            ARRAY_SIZE(sh7372_late_devices));
 
-       rmobile_add_device_to_domain(&sh7372_pd_a3rv, &vpu_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a4mp, &spu0_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a4mp, &spu1_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a3sp, &scif0_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a3sp, &scif1_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a3sp, &scif2_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a3sp, &scif3_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a3sp, &scif4_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a3sp, &scif5_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a3sp, &scif6_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a3sp, &iic1_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a3sp, &dma0_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a3sp, &dma1_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a3sp, &dma2_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a3sp, &usb_dma0_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a3sp, &usb_dma1_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a4r, &iic0_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a4r, &veu0_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a4r, &veu1_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a4r, &veu2_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a4r, &veu3_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a4r, &jpu_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a4r, &tmu00_device);
-       rmobile_add_device_to_domain(&sh7372_pd_a4r, &tmu01_device);
+       rmobile_add_devices_to_domains(domain_devices,
+                                      ARRAY_SIZE(domain_devices));
 }
 
 static void __init sh7372_earlytimer_init(void)
index 119bc52ab93ed7675d4528a779e97270d308c70e..4e07eec1270dd3b3fa51860fbc735a5d1fb9e18b 100644 (file)
@@ -63,10 +63,11 @@ static int contextidr_notifier(struct notifier_block *unused, unsigned long cmd,
        pid = task_pid_nr(thread->task) << ASID_BITS;
        asm volatile(
        "       mrc     p15, 0, %0, c13, c0, 1\n"
-       "       bfi     %1, %0, #0, %2\n"
-       "       mcr     p15, 0, %1, c13, c0, 1\n"
+       "       and     %0, %0, %2\n"
+       "       orr     %0, %0, %1\n"
+       "       mcr     p15, 0, %0, c13, c0, 1\n"
        : "=r" (contextidr), "+r" (pid)
-       : "I" (ASID_BITS));
+       : "I" (~ASID_MASK));
        isb();
 
        return NOTIFY_OK;
index 4e7d1182e8a3a59270073b5cb3b348e0bd690ef8..e59c4ab71bcb78282f968cebbda09c43b49809ff 100644 (file)
@@ -267,17 +267,19 @@ static void __dma_free_remap(void *cpu_addr, size_t size)
        vunmap(cpu_addr);
 }
 
+#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
+
 struct dma_pool {
        size_t size;
        spinlock_t lock;
        unsigned long *bitmap;
        unsigned long nr_pages;
        void *vaddr;
-       struct page *page;
+       struct page **pages;
 };
 
 static struct dma_pool atomic_pool = {
-       .size = SZ_256K,
+       .size = DEFAULT_DMA_COHERENT_POOL_SIZE,
 };
 
 static int __init early_coherent_pool(char *p)
@@ -287,6 +289,21 @@ static int __init early_coherent_pool(char *p)
 }
 early_param("coherent_pool", early_coherent_pool);
 
+void __init init_dma_coherent_pool_size(unsigned long size)
+{
+       /*
+        * Catch any attempt to set the pool size too late.
+        */
+       BUG_ON(atomic_pool.vaddr);
+
+       /*
+        * Set architecture specific coherent pool size only if
+        * it has not been changed by kernel command line parameter.
+        */
+       if (atomic_pool.size == DEFAULT_DMA_COHERENT_POOL_SIZE)
+               atomic_pool.size = size;
+}
+
 /*
  * Initialise the coherent pool for atomic allocations.
  */
@@ -297,6 +314,7 @@ static int __init atomic_pool_init(void)
        unsigned long nr_pages = pool->size >> PAGE_SHIFT;
        unsigned long *bitmap;
        struct page *page;
+       struct page **pages;
        void *ptr;
        int bitmap_size = BITS_TO_LONGS(nr_pages) * sizeof(long);
 
@@ -304,21 +322,31 @@ static int __init atomic_pool_init(void)
        if (!bitmap)
                goto no_bitmap;
 
+       pages = kzalloc(nr_pages * sizeof(struct page *), GFP_KERNEL);
+       if (!pages)
+               goto no_pages;
+
        if (IS_ENABLED(CONFIG_CMA))
                ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page);
        else
                ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot,
                                           &page, NULL);
        if (ptr) {
+               int i;
+
+               for (i = 0; i < nr_pages; i++)
+                       pages[i] = page + i;
+
                spin_lock_init(&pool->lock);
                pool->vaddr = ptr;
-               pool->page = page;
+               pool->pages = pages;
                pool->bitmap = bitmap;
                pool->nr_pages = nr_pages;
                pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n",
                       (unsigned)pool->size / 1024);
                return 0;
        }
+no_pages:
        kfree(bitmap);
 no_bitmap:
        pr_err("DMA: failed to allocate %u KiB pool for atomic coherent allocation\n",
@@ -443,27 +471,45 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page)
        if (pageno < pool->nr_pages) {
                bitmap_set(pool->bitmap, pageno, count);
                ptr = pool->vaddr + PAGE_SIZE * pageno;
-               *ret_page = pool->page + pageno;
+               *ret_page = pool->pages[pageno];
+       } else {
+               pr_err_once("ERROR: %u KiB atomic DMA coherent pool is too small!\n"
+                           "Please increase it with coherent_pool= kernel parameter!\n",
+                           (unsigned)pool->size / 1024);
        }
        spin_unlock_irqrestore(&pool->lock, flags);
 
        return ptr;
 }
 
+static bool __in_atomic_pool(void *start, size_t size)
+{
+       struct dma_pool *pool = &atomic_pool;
+       void *end = start + size;
+       void *pool_start = pool->vaddr;
+       void *pool_end = pool->vaddr + pool->size;
+
+       if (start < pool_start || start >= pool_end)
+               return false;
+
+       if (end <= pool_end)
+               return true;
+
+       WARN(1, "Wrong coherent size(%p-%p) from atomic pool(%p-%p)\n",
+            start, end - 1, pool_start, pool_end - 1);
+
+       return false;
+}
+
 static int __free_from_pool(void *start, size_t size)
 {
        struct dma_pool *pool = &atomic_pool;
        unsigned long pageno, count;
        unsigned long flags;
 
-       if (start < pool->vaddr || start > pool->vaddr + pool->size)
+       if (!__in_atomic_pool(start, size))
                return 0;
 
-       if (start + size > pool->vaddr + pool->size) {
-               WARN(1, "freeing wrong coherent size from pool\n");
-               return 0;
-       }
-
        pageno = (start - pool->vaddr) >> PAGE_SHIFT;
        count = size >> PAGE_SHIFT;
 
@@ -1090,10 +1136,22 @@ static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t si
        return 0;
 }
 
+static struct page **__atomic_get_pages(void *addr)
+{
+       struct dma_pool *pool = &atomic_pool;
+       struct page **pages = pool->pages;
+       int offs = (addr - pool->vaddr) >> PAGE_SHIFT;
+
+       return pages + offs;
+}
+
 static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs)
 {
        struct vm_struct *area;
 
+       if (__in_atomic_pool(cpu_addr, PAGE_SIZE))
+               return __atomic_get_pages(cpu_addr);
+
        if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs))
                return cpu_addr;
 
@@ -1103,6 +1161,34 @@ static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs)
        return NULL;
 }
 
+static void *__iommu_alloc_atomic(struct device *dev, size_t size,
+                                 dma_addr_t *handle)
+{
+       struct page *page;
+       void *addr;
+
+       addr = __alloc_from_pool(size, &page);
+       if (!addr)
+               return NULL;
+
+       *handle = __iommu_create_mapping(dev, &page, size);
+       if (*handle == DMA_ERROR_CODE)
+               goto err_mapping;
+
+       return addr;
+
+err_mapping:
+       __free_from_pool(addr, size);
+       return NULL;
+}
+
+static void __iommu_free_atomic(struct device *dev, struct page **pages,
+                               dma_addr_t handle, size_t size)
+{
+       __iommu_remove_mapping(dev, handle, size);
+       __free_from_pool(page_address(pages[0]), size);
+}
+
 static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
            dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
 {
@@ -1113,6 +1199,9 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
        *handle = DMA_ERROR_CODE;
        size = PAGE_ALIGN(size);
 
+       if (gfp & GFP_ATOMIC)
+               return __iommu_alloc_atomic(dev, size, handle);
+
        pages = __iommu_alloc_buffer(dev, size, gfp);
        if (!pages)
                return NULL;
@@ -1179,6 +1268,11 @@ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
                return;
        }
 
+       if (__in_atomic_pool(cpu_addr, size)) {
+               __iommu_free_atomic(dev, pages, handle, size);
+               return;
+       }
+
        if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) {
                unmap_kernel_range((unsigned long)cpu_addr, size);
                vunmap(cpu_addr);
index 6776160618ef0ede79d07b4f6a0873c30df2a1d0..a8ee92da3544926138f68116fbddbc2c55b64dbf 100644 (file)
@@ -55,6 +55,9 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page
 /* permanent static mappings from iotable_init() */
 #define VM_ARM_STATIC_MAPPING  0x40000000
 
+/* empty mapping */
+#define VM_ARM_EMPTY_MAPPING   0x20000000
+
 /* mapping type (attributes) for permanent static mappings */
 #define VM_ARM_MTYPE(mt)               ((mt) << 20)
 #define VM_ARM_MTYPE_MASK      (0x1f << 20)
index 4c2d0451e84af1c2a0347a6fe462dd2e3306db3e..c2fa21d0103e0348f2b1f48c886aa11d63809dad 100644 (file)
@@ -807,7 +807,7 @@ static void __init pmd_empty_section_gap(unsigned long addr)
        vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm));
        vm->addr = (void *)addr;
        vm->size = SECTION_SIZE;
-       vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
+       vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING;
        vm->caller = pmd_empty_section_gap;
        vm_area_add_early(vm);
 }
@@ -820,7 +820,7 @@ static void __init fill_pmd_gaps(void)
 
        /* we're still single threaded hence no lock needed here */
        for (vm = vmlist; vm; vm = vm->next) {
-               if (!(vm->flags & VM_ARM_STATIC_MAPPING))
+               if (!(vm->flags & (VM_ARM_STATIC_MAPPING | VM_ARM_EMPTY_MAPPING)))
                        continue;
                addr = (unsigned long)vm->addr;
                if (addr < next)
@@ -961,8 +961,8 @@ void __init sanity_check_meminfo(void)
                 * Check whether this memory bank would partially overlap
                 * the vmalloc area.
                 */
-               if (__va(bank->start + bank->size) > vmalloc_min ||
-                   __va(bank->start + bank->size) < __va(bank->start)) {
+               if (__va(bank->start + bank->size - 1) >= vmalloc_min ||
+                   __va(bank->start + bank->size - 1) <= __va(bank->start)) {
                        unsigned long newsize = vmalloc_min - __va(bank->start);
                        printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx "
                               "to -%.8llx (vmalloc region overlap).\n",
index 766181cb5c95c277b8495966835571059a36dda5..024f3b08db29b0046a58457120259e7b5366a1ca 100644 (file)
@@ -68,6 +68,7 @@
 
 static unsigned long omap_sram_start;
 static void __iomem *omap_sram_base;
+static unsigned long omap_sram_skip;
 static unsigned long omap_sram_size;
 static void __iomem *omap_sram_ceil;
 
@@ -106,6 +107,7 @@ static int is_sram_locked(void)
  */
 static void __init omap_detect_sram(void)
 {
+       omap_sram_skip = SRAM_BOOTLOADER_SZ;
        if (cpu_class_is_omap2()) {
                if (is_sram_locked()) {
                        if (cpu_is_omap34xx()) {
@@ -113,6 +115,7 @@ static void __init omap_detect_sram(void)
                                if ((omap_type() == OMAP2_DEVICE_TYPE_EMU) ||
                                    (omap_type() == OMAP2_DEVICE_TYPE_SEC)) {
                                        omap_sram_size = 0x7000; /* 28K */
+                                       omap_sram_skip += SZ_16K;
                                } else {
                                        omap_sram_size = 0x8000; /* 32K */
                                }
@@ -175,8 +178,10 @@ static void __init omap_map_sram(void)
                return;
 
 #ifdef CONFIG_OMAP4_ERRATA_I688
+       if (cpu_is_omap44xx()) {
                omap_sram_start += PAGE_SIZE;
                omap_sram_size -= SZ_16K;
+       }
 #endif
        if (cpu_is_omap34xx()) {
                /*
@@ -203,8 +208,8 @@ static void __init omap_map_sram(void)
         * Looks like we need to preserve some bootloader code at the
         * beginning of SRAM for jumping to flash for reboot to work...
         */
-       memset_io(omap_sram_base + SRAM_BOOTLOADER_SZ, 0,
-                 omap_sram_size - SRAM_BOOTLOADER_SZ);
+       memset_io(omap_sram_base + omap_sram_skip, 0,
+                 omap_sram_size - omap_sram_skip);
 }
 
 /*
@@ -218,7 +223,7 @@ void *omap_sram_push_address(unsigned long size)
 {
        unsigned long available, new_ceil = (unsigned long)omap_sram_ceil;
 
-       available = omap_sram_ceil - (omap_sram_base + SRAM_BOOTLOADER_SZ);
+       available = omap_sram_ceil - (omap_sram_base + omap_sram_skip);
 
        if (size > available) {
                pr_err("Not enough space in SRAM\n");
index f34861920634d15c9de2b1149aa8562a767e64f1..c7092e6057c56e4fa856362263ed4ac49a95d32e 100644 (file)
@@ -38,6 +38,7 @@ config BLACKFIN
        select GENERIC_ATOMIC64
        select GENERIC_IRQ_PROBE
        select IRQ_PER_CPU if SMP
+       select USE_GENERIC_SMP_HELPERS if SMP
        select HAVE_NMI_WATCHDOG if NMI_WATCHDOG
        select GENERIC_SMP_IDLE_THREAD
        select ARCH_USES_GETTIMEOFFSET if !GENERIC_CLOCKEVENTS
index d3d7e64ca96dacfa41bc298733ef3ed2076c7d3e..66cf00095b8487210b3187cbf41072bc3d80406b 100644 (file)
@@ -20,7 +20,6 @@ endif
 KBUILD_AFLAGS           += $(call cc-option,-mno-fdpic)
 KBUILD_CFLAGS_MODULE    += -mlong-calls
 LDFLAGS                 += -m elf32bfin
-KALLSYMS         += --symbol-prefix=_
 
 KBUILD_DEFCONFIG := BF537-STAMP_defconfig
 
index dc3d144b4bb5930a396f73d9bce48f67ca1c6365..9631598dcc5d120321febba77a85b6309bbdea7b 100644 (file)
@@ -18,6 +18,8 @@
 #define raw_smp_processor_id()  blackfin_core_id()
 
 extern void bfin_relocate_coreb_l1_mem(void);
+extern void arch_send_call_function_single_ipi(int cpu);
+extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
 
 #if defined(CONFIG_SMP) && defined(CONFIG_ICACHE_FLUSH_L1)
 asmlinkage void blackfin_icache_flush_range_l1(unsigned long *ptr);
index 00bbe672b3b308433445d39461637f3a0d396573..a40151306b77ff301b9b152b205795a6fece4b9c 100644 (file)
@@ -48,10 +48,13 @@ unsigned long blackfin_iflush_l1_entry[NR_CPUS];
 
 struct blackfin_initial_pda __cpuinitdata initial_pda_coreb;
 
-#define BFIN_IPI_TIMER       0
-#define BFIN_IPI_RESCHEDULE   1
-#define BFIN_IPI_CALL_FUNC    2
-#define BFIN_IPI_CPU_STOP     3
+enum ipi_message_type {
+       BFIN_IPI_TIMER,
+       BFIN_IPI_RESCHEDULE,
+       BFIN_IPI_CALL_FUNC,
+       BFIN_IPI_CALL_FUNC_SINGLE,
+       BFIN_IPI_CPU_STOP,
+};
 
 struct blackfin_flush_data {
        unsigned long start;
@@ -60,35 +63,20 @@ struct blackfin_flush_data {
 
 void *secondary_stack;
 
-
-struct smp_call_struct {
-       void (*func)(void *info);
-       void *info;
-       int wait;
-       cpumask_t *waitmask;
-};
-
 static struct blackfin_flush_data smp_flush_data;
 
 static DEFINE_SPINLOCK(stop_lock);
 
-struct ipi_message {
-       unsigned long type;
-       struct smp_call_struct call_struct;
-};
-
 /* A magic number - stress test shows this is safe for common cases */
 #define BFIN_IPI_MSGQ_LEN 5
 
 /* Simple FIFO buffer, overflow leads to panic */
-struct ipi_message_queue {
-       spinlock_t lock;
+struct ipi_data {
        unsigned long count;
-       unsigned long head; /* head of the queue */
-       struct ipi_message ipi_message[BFIN_IPI_MSGQ_LEN];
+       unsigned long bits;
 };
 
-static DEFINE_PER_CPU(struct ipi_message_queue, ipi_msg_queue);
+static DEFINE_PER_CPU(struct ipi_data, bfin_ipi);
 
 static void ipi_cpu_stop(unsigned int cpu)
 {
@@ -129,28 +117,6 @@ static void ipi_flush_icache(void *info)
        blackfin_icache_flush_range(fdata->start, fdata->end);
 }
 
-static void ipi_call_function(unsigned int cpu, struct ipi_message *msg)
-{
-       int wait;
-       void (*func)(void *info);
-       void *info;
-       func = msg->call_struct.func;
-       info = msg->call_struct.info;
-       wait = msg->call_struct.wait;
-       func(info);
-       if (wait) {
-#ifdef __ARCH_SYNC_CORE_DCACHE
-               /*
-                * 'wait' usually means synchronization between CPUs.
-                * Invalidate D cache in case shared data was changed
-                * by func() to ensure cache coherence.
-                */
-               resync_core_dcache();
-#endif
-               cpumask_clear_cpu(cpu, msg->call_struct.waitmask);
-       }
-}
-
 /* Use IRQ_SUPPLE_0 to request reschedule.
  * When returning from interrupt to user space,
  * there is chance to reschedule */
@@ -172,152 +138,95 @@ void ipi_timer(void)
 
 static irqreturn_t ipi_handler_int1(int irq, void *dev_instance)
 {
-       struct ipi_message *msg;
-       struct ipi_message_queue *msg_queue;
+       struct ipi_data *bfin_ipi_data;
        unsigned int cpu = smp_processor_id();
-       unsigned long flags;
+       unsigned long pending;
+       unsigned long msg;
 
        platform_clear_ipi(cpu, IRQ_SUPPLE_1);
 
-       msg_queue = &__get_cpu_var(ipi_msg_queue);
-
-       spin_lock_irqsave(&msg_queue->lock, flags);
-
-       while (msg_queue->count) {
-               msg = &msg_queue->ipi_message[msg_queue->head];
-               switch (msg->type) {
-               case BFIN_IPI_TIMER:
-                       ipi_timer();
-                       break;
-               case BFIN_IPI_RESCHEDULE:
-                       scheduler_ipi();
-                       break;
-               case BFIN_IPI_CALL_FUNC:
-                       ipi_call_function(cpu, msg);
-                       break;
-               case BFIN_IPI_CPU_STOP:
-                       ipi_cpu_stop(cpu);
-                       break;
-               default:
-                       printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%lx\n",
-                              cpu, msg->type);
-                       break;
-               }
-               msg_queue->head++;
-               msg_queue->head %= BFIN_IPI_MSGQ_LEN;
-               msg_queue->count--;
+       bfin_ipi_data = &__get_cpu_var(bfin_ipi);
+
+       while ((pending = xchg(&bfin_ipi_data->bits, 0)) != 0) {
+               msg = 0;
+               do {
+                       msg = find_next_bit(&pending, BITS_PER_LONG, msg + 1);
+                       switch (msg) {
+                       case BFIN_IPI_TIMER:
+                               ipi_timer();
+                               break;
+                       case BFIN_IPI_RESCHEDULE:
+                               scheduler_ipi();
+                               break;
+                       case BFIN_IPI_CALL_FUNC:
+                               generic_smp_call_function_interrupt();
+                               break;
+
+                       case BFIN_IPI_CALL_FUNC_SINGLE:
+                               generic_smp_call_function_single_interrupt();
+                               break;
+
+                       case BFIN_IPI_CPU_STOP:
+                               ipi_cpu_stop(cpu);
+                               break;
+                       }
+               } while (msg < BITS_PER_LONG);
+
+               smp_mb();
        }
-       spin_unlock_irqrestore(&msg_queue->lock, flags);
        return IRQ_HANDLED;
 }
 
-static void ipi_queue_init(void)
+static void bfin_ipi_init(void)
 {
        unsigned int cpu;
-       struct ipi_message_queue *msg_queue;
+       struct ipi_data *bfin_ipi_data;
        for_each_possible_cpu(cpu) {
-               msg_queue = &per_cpu(ipi_msg_queue, cpu);
-               spin_lock_init(&msg_queue->lock);
-               msg_queue->count = 0;
-               msg_queue->head = 0;
+               bfin_ipi_data = &per_cpu(bfin_ipi, cpu);
+               bfin_ipi_data->bits = 0;
+               bfin_ipi_data->count = 0;
        }
 }
 
-static inline void smp_send_message(cpumask_t callmap, unsigned long type,
-                                       void (*func) (void *info), void *info, int wait)
+void send_ipi(const struct cpumask *cpumask, enum ipi_message_type msg)
 {
        unsigned int cpu;
-       struct ipi_message_queue *msg_queue;
-       struct ipi_message *msg;
-       unsigned long flags, next_msg;
-       cpumask_t waitmask; /* waitmask is shared by all cpus */
-
-       cpumask_copy(&waitmask, &callmap);
-       for_each_cpu(cpu, &callmap) {
-               msg_queue = &per_cpu(ipi_msg_queue, cpu);
-               spin_lock_irqsave(&msg_queue->lock, flags);
-               if (msg_queue->count < BFIN_IPI_MSGQ_LEN) {
-                       next_msg = (msg_queue->head + msg_queue->count)
-                                       % BFIN_IPI_MSGQ_LEN;
-                       msg = &msg_queue->ipi_message[next_msg];
-                       msg->type = type;
-                       if (type == BFIN_IPI_CALL_FUNC) {
-                               msg->call_struct.func = func;
-                               msg->call_struct.info = info;
-                               msg->call_struct.wait = wait;
-                               msg->call_struct.waitmask = &waitmask;
-                       }
-                       msg_queue->count++;
-               } else
-                       panic("IPI message queue overflow\n");
-               spin_unlock_irqrestore(&msg_queue->lock, flags);
+       struct ipi_data *bfin_ipi_data;
+       unsigned long flags;
+
+       local_irq_save(flags);
+
+       for_each_cpu(cpu, cpumask) {
+               bfin_ipi_data = &per_cpu(bfin_ipi, cpu);
+               smp_mb();
+               set_bit(msg, &bfin_ipi_data->bits);
+               bfin_ipi_data->count++;
                platform_send_ipi_cpu(cpu, IRQ_SUPPLE_1);
        }
 
-       if (wait) {
-               while (!cpumask_empty(&waitmask))
-                       blackfin_dcache_invalidate_range(
-                               (unsigned long)(&waitmask),
-                               (unsigned long)(&waitmask));
-#ifdef __ARCH_SYNC_CORE_DCACHE
-               /*
-                * Invalidate D cache in case shared data was changed by
-                * other processors to ensure cache coherence.
-                */
-               resync_core_dcache();
-#endif
-       }
+       local_irq_restore(flags);
 }
 
-int smp_call_function(void (*func)(void *info), void *info, int wait)
+void arch_send_call_function_single_ipi(int cpu)
 {
-       cpumask_t callmap;
-
-       preempt_disable();
-       cpumask_copy(&callmap, cpu_online_mask);
-       cpumask_clear_cpu(smp_processor_id(), &callmap);
-       if (!cpumask_empty(&callmap))
-               smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait);
-
-       preempt_enable();
-
-       return 0;
+       send_ipi(cpumask_of(cpu), BFIN_IPI_CALL_FUNC_SINGLE);
 }
-EXPORT_SYMBOL_GPL(smp_call_function);
 
-int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
-                               int wait)
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
 {
-       unsigned int cpu = cpuid;
-       cpumask_t callmap;
-
-       if (cpu_is_offline(cpu))
-               return 0;
-       cpumask_clear(&callmap);
-       cpumask_set_cpu(cpu, &callmap);
-
-       smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait);
-
-       return 0;
+       send_ipi(mask, BFIN_IPI_CALL_FUNC);
 }
-EXPORT_SYMBOL_GPL(smp_call_function_single);
 
 void smp_send_reschedule(int cpu)
 {
-       cpumask_t callmap;
-       /* simply trigger an ipi */
-
-       cpumask_clear(&callmap);
-       cpumask_set_cpu(cpu, &callmap);
-
-       smp_send_message(callmap, BFIN_IPI_RESCHEDULE, NULL, NULL, 0);
+       send_ipi(cpumask_of(cpu), BFIN_IPI_RESCHEDULE);
 
        return;
 }
 
 void smp_send_msg(const struct cpumask *mask, unsigned long type)
 {
-       smp_send_message(*mask, type, NULL, NULL, 0);
+       send_ipi(mask, type);
 }
 
 void smp_timer_broadcast(const struct cpumask *mask)
@@ -333,7 +242,7 @@ void smp_send_stop(void)
        cpumask_copy(&callmap, cpu_online_mask);
        cpumask_clear_cpu(smp_processor_id(), &callmap);
        if (!cpumask_empty(&callmap))
-               smp_send_message(callmap, BFIN_IPI_CPU_STOP, NULL, NULL, 0);
+               send_ipi(&callmap, BFIN_IPI_CPU_STOP);
 
        preempt_enable();
 
@@ -436,7 +345,7 @@ void __init smp_prepare_boot_cpu(void)
 void __init smp_prepare_cpus(unsigned int max_cpus)
 {
        platform_prepare_cpus(max_cpus);
-       ipi_queue_init();
+       bfin_ipi_init();
        platform_request_ipi(IRQ_SUPPLE_0, ipi_handler_int0);
        platform_request_ipi(IRQ_SUPPLE_1, ipi_handler_int1);
 }
index 53b6dfa83344a9f3dbe6b68ef72c6d60948f26ab..54b73a28c20579f4f6923e4f9711b5d7dc062205 100644 (file)
@@ -386,6 +386,7 @@ extern unsigned long cpuidle_disable;
 enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_POWERSAVE_OFF};
 
 extern int powersave_nap;      /* set if nap mode can be used in idle loop */
+extern void power7_nap(void);
 
 #ifdef CONFIG_PSERIES_IDLE
 extern void update_smt_snooze_delay(int snooze);
index 85b05c463fae9077b4490bacfa80c31050a617fa..e8995727b1c15207757969ca9e4cd5b7990c2d17 100644 (file)
@@ -76,6 +76,7 @@ int main(void)
        DEFINE(SIGSEGV, SIGSEGV);
        DEFINE(NMI_MASK, NMI_MASK);
        DEFINE(THREAD_DSCR, offsetof(struct thread_struct, dscr));
+       DEFINE(THREAD_DSCR_INHERIT, offsetof(struct thread_struct, dscr_inherit));
 #else
        DEFINE(THREAD_INFO, offsetof(struct task_struct, stack));
 #endif /* CONFIG_PPC64 */
index 5b25c8060fd67a8ff1ec2ff290af5ec740b6376f..a892680668d86ce53d3d8e405c807d968c701a92 100644 (file)
@@ -28,6 +28,8 @@ void doorbell_setup_this_cpu(void)
 
 void doorbell_cause_ipi(int cpu, unsigned long data)
 {
+       /* Order previous accesses vs. msgsnd, which is treated as a store */
+       mb();
        ppc_msgsnd(PPC_DBELL, 0, data);
 }
 
index 4b01a25e29ef61ea5cdeb07a14939142ba1ecdd9..b40e0b4815b3f5297154fa9c6e8a66fa4ce7c574 100644 (file)
@@ -370,6 +370,12 @@ _GLOBAL(ret_from_fork)
        li      r3,0
        b       syscall_exit
 
+       .section        ".toc","aw"
+DSCR_DEFAULT:
+       .tc dscr_default[TC],dscr_default
+
+       .section        ".text"
+
 /*
  * This routine switches between two different tasks.  The process
  * state of one is saved on its kernel stack.  Then the state
@@ -509,9 +515,6 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
        mr      r1,r8           /* start using new stack pointer */
        std     r7,PACAKSAVE(r13)
 
-       ld      r6,_CCR(r1)
-       mtcrf   0xFF,r6
-
 #ifdef CONFIG_ALTIVEC
 BEGIN_FTR_SECTION
        ld      r0,THREAD_VRSAVE(r4)
@@ -520,14 +523,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
 #endif /* CONFIG_ALTIVEC */
 #ifdef CONFIG_PPC64
 BEGIN_FTR_SECTION
+       lwz     r6,THREAD_DSCR_INHERIT(r4)
+       ld      r7,DSCR_DEFAULT@toc(2)
        ld      r0,THREAD_DSCR(r4)
-       cmpd    r0,r25
-       beq     1f
+       cmpwi   r6,0
+       bne     1f
+       ld      r0,0(r7)
+1:     cmpd    r0,r25
+       beq     2f
        mtspr   SPRN_DSCR,r0
-1:     
+2:
 END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
 #endif
 
+       ld      r6,_CCR(r1)
+       mtcrf   0xFF,r6
+
        /* r3-r13 are destroyed -- Cort */
        REST_8GPRS(14, r1)
        REST_10GPRS(22, r1)
index e894515e77bbfa950c7d3e00cfbaafa07d0a0075..39aa97d3ff883a2ed5121e1cc11afd5ffae0f898 100644 (file)
@@ -186,7 +186,7 @@ hardware_interrupt_hv:
        KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x800)
 
        MASKABLE_EXCEPTION_PSERIES(0x900, 0x900, decrementer)
-       MASKABLE_EXCEPTION_HV(0x980, 0x982, decrementer)
+       STD_EXCEPTION_HV(0x980, 0x982, hdecrementer)
 
        STD_EXCEPTION_PSERIES(0xa00, 0xa00, trap_0a)
        KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xa00)
@@ -486,6 +486,7 @@ machine_check_common:
 
        STD_EXCEPTION_COMMON_ASYNC(0x500, hardware_interrupt, do_IRQ)
        STD_EXCEPTION_COMMON_ASYNC(0x900, decrementer, .timer_interrupt)
+       STD_EXCEPTION_COMMON(0x980, hdecrementer, .hdec_interrupt)
        STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
        STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
        STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
index 7140d838339e1a9e2ef691ff4079ea022f4d964c..e11863f4e595e2a3302e3ead79b439751ca5d402 100644 (file)
@@ -28,7 +28,9 @@ _GLOBAL(power7_idle)
        lwz     r4,ADDROFF(powersave_nap)(r3)
        cmpwi   0,r4,0
        beqlr
+       /* fall through */
 
+_GLOBAL(power7_nap)
        /* NAP is a state loss, we create a regs frame on the
         * stack, fill it up with the state we care about and
         * stick a pointer to it in PACAR1. We really only
index 710f400476deb8461a98a1342c6ee1b669fbe686..1a1f2ddfb581a222fb5fe85a0d8983ef5ad7e2af 100644 (file)
@@ -802,16 +802,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
 #endif /* CONFIG_PPC_STD_MMU_64 */
 #ifdef CONFIG_PPC64 
        if (cpu_has_feature(CPU_FTR_DSCR)) {
-               if (current->thread.dscr_inherit) {
-                       p->thread.dscr_inherit = 1;
-                       p->thread.dscr = current->thread.dscr;
-               } else if (0 != dscr_default) {
-                       p->thread.dscr_inherit = 1;
-                       p->thread.dscr = dscr_default;
-               } else {
-                       p->thread.dscr_inherit = 0;
-                       p->thread.dscr = 0;
-               }
+               p->thread.dscr_inherit = current->thread.dscr_inherit;
+               p->thread.dscr = current->thread.dscr;
        }
 #endif
 
index 0321007086f7c499771ad0f71bc1f63969faff7a..8d4214afc21d6934fef8d12b582073c64a6ff7f3 100644 (file)
@@ -198,8 +198,15 @@ void smp_muxed_ipi_message_pass(int cpu, int msg)
        struct cpu_messages *info = &per_cpu(ipi_message, cpu);
        char *message = (char *)&info->messages;
 
+       /*
+        * Order previous accesses before accesses in the IPI handler.
+        */
+       smp_mb();
        message[msg] = 1;
-       mb();
+       /*
+        * cause_ipi functions are required to include a full barrier
+        * before doing whatever causes the IPI.
+        */
        smp_ops->cause_ipi(cpu, info->data);
 }
 
@@ -211,7 +218,7 @@ irqreturn_t smp_ipi_demux(void)
        mb();   /* order any irq clear */
 
        do {
-               all = xchg_local(&info->messages, 0);
+               all = xchg(&info->messages, 0);
 
 #ifdef __BIG_ENDIAN
                if (all & (1 << (24 - 8 * PPC_MSG_CALL_FUNCTION)))
index 3529446c2abd78a407f9b3a7fd9735ed49802656..8302af6492195b77ac8a5b7f5dfbc69dfc7d43b8 100644 (file)
@@ -194,6 +194,14 @@ static ssize_t show_dscr_default(struct device *dev,
        return sprintf(buf, "%lx\n", dscr_default);
 }
 
+static void update_dscr(void *dummy)
+{
+       if (!current->thread.dscr_inherit) {
+               current->thread.dscr = dscr_default;
+               mtspr(SPRN_DSCR, dscr_default);
+       }
+}
+
 static ssize_t __used store_dscr_default(struct device *dev,
                struct device_attribute *attr, const char *buf,
                size_t count)
@@ -206,6 +214,8 @@ static ssize_t __used store_dscr_default(struct device *dev,
                return -EINVAL;
        dscr_default = val;
 
+       on_each_cpu(update_dscr, NULL, 1);
+
        return count;
 }
 
index be171ee73bf8cd3bef83630fbc031ad4f29410e7..e49e93191b69a736332e06cf9be56aad189d0ec3 100644 (file)
@@ -535,6 +535,15 @@ void timer_interrupt(struct pt_regs * regs)
        trace_timer_interrupt_exit(regs);
 }
 
+/*
+ * Hypervisor decrementer interrupts shouldn't occur but are sometimes
+ * left pending on exit from a KVM guest.  We don't need to do anything
+ * to clear them, as they are edge-triggered.
+ */
+void hdec_interrupt(struct pt_regs *regs)
+{
+}
+
 #ifdef CONFIG_SUSPEND
 static void generic_suspend_disable_irqs(void)
 {
index 158972341a2d7a66da48c32878fdc0842b2a4feb..ae0843fa7a61f64540214b2fbf658265d24879df 100644 (file)
@@ -972,8 +972,9 @@ static int emulate_instruction(struct pt_regs *regs)
                        cpu_has_feature(CPU_FTR_DSCR)) {
                PPC_WARN_EMULATED(mtdscr, regs);
                rd = (instword >> 21) & 0x1f;
-               mtspr(SPRN_DSCR, regs->gpr[rd]);
+               current->thread.dscr = regs->gpr[rd];
                current->thread.dscr_inherit = 1;
+               mtspr(SPRN_DSCR, current->thread.dscr);
                return 0;
        }
 #endif
index dd223b3eb333ac3446725f0768d5da412000ba75..17e5b23643124347febe56a05f5deb7e1a2e361e 100644 (file)
@@ -20,7 +20,7 @@ int patch_instruction(unsigned int *addr, unsigned int instr)
 {
        int err;
 
-       err = __put_user(instr, addr);
+       __put_user_size(instr, addr, 4, err);
        if (err)
                return err;
        asm ("dcbst 0, %0; sync; icbi 0,%0; sync; isync" : : "r" (addr));
index 39b159751c35ab17d5e3df0dd48282abe5228700..59213cfaeca9f868143261b2a246834c115b6542 100644 (file)
@@ -1436,11 +1436,11 @@ static long vphn_get_associativity(unsigned long cpu,
 
 /*
  * Update the node maps and sysfs entries for each cpu whose home node
- * has changed.
+ * has changed. Returns 1 when the topology has changed, and 0 otherwise.
  */
 int arch_update_cpu_topology(void)
 {
-       int cpu, nid, old_nid;
+       int cpu, nid, old_nid, changed = 0;
        unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0};
        struct device *dev;
 
@@ -1466,9 +1466,10 @@ int arch_update_cpu_topology(void)
                dev = get_cpu_device(cpu);
                if (dev)
                        kobject_uevent(&dev->kobj, KOBJ_CHANGE);
+               changed = 1;
        }
 
-       return 1;
+       return changed;
 }
 
 static void topology_work_fn(struct work_struct *work)
index 3ef46254c35ba907dfd9329f9164db3176a9d7a4..7698b6e13c57f6312a7505a3610fbf6b27271991 100644 (file)
@@ -106,14 +106,6 @@ static void pnv_smp_cpu_kill_self(void)
 {
        unsigned int cpu;
 
-       /* If powersave_nap is enabled, use NAP mode, else just
-        * spin aimlessly
-        */
-       if (!powersave_nap) {
-               generic_mach_cpu_die();
-               return;
-       }
-
        /* Standard hot unplug procedure */
        local_irq_disable();
        idle_task_exit();
@@ -128,7 +120,7 @@ static void pnv_smp_cpu_kill_self(void)
         */
        mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1);
        while (!generic_check_cpu_restart(cpu)) {
-               power7_idle();
+               power7_nap();
                if (!generic_check_cpu_restart(cpu)) {
                        DBG("CPU%d Unexpected exit while offline !\n", cpu);
                        /* We may be getting an IPI, so we re-enable
index 14469cf9df68dc8497408a8953cbbbcf7b360ea1..df0fc58214697623b429f55555d494e8ccf56c52 100644 (file)
@@ -65,7 +65,11 @@ static inline void icp_hv_set_xirr(unsigned int value)
 static inline void icp_hv_set_qirr(int n_cpu , u8 value)
 {
        int hw_cpu = get_hard_smp_processor_id(n_cpu);
-       long rc = plpar_hcall_norets(H_IPI, hw_cpu, value);
+       long rc;
+
+       /* Make sure all previous accesses are ordered before IPI sending */
+       mb();
+       rc = plpar_hcall_norets(H_IPI, hw_cpu, value);
        if (rc != H_SUCCESS) {
                pr_err("%s: bad return code qirr cpu=%d hw_cpu=%d mfrr=0x%x "
                        "returned %ld\n", __func__, n_cpu, hw_cpu, value, rc);
index a1e9d69a9c90e579c2f68cea8869ae6ec26d05c4..584b93674ea43bceea2259142081ef81a02ad9aa 100644 (file)
@@ -169,7 +169,7 @@ static ssize_t hw_interval_write(struct file *file, char const __user *buf,
        if (*offset)
                return -EINVAL;
        retval = oprofilefs_ulong_from_user(&val, buf, count);
-       if (retval)
+       if (retval <= 0)
                return retval;
        if (val < oprofile_min_interval)
                oprofile_hw_interval = oprofile_min_interval;
@@ -212,7 +212,7 @@ static ssize_t hwsampler_zero_write(struct file *file, char const __user *buf,
                return -EINVAL;
 
        retval = oprofilefs_ulong_from_user(&val, buf, count);
-       if (retval)
+       if (retval <= 0)
                return retval;
        if (val != 0)
                return -EINVAL;
@@ -243,7 +243,7 @@ static ssize_t hwsampler_kernel_write(struct file *file, char const __user *buf,
                return -EINVAL;
 
        retval = oprofilefs_ulong_from_user(&val, buf, count);
-       if (retval)
+       if (retval <= 0)
                return retval;
 
        if (val != 0 && val != 1)
@@ -278,7 +278,7 @@ static ssize_t hwsampler_user_write(struct file *file, char const __user *buf,
                return -EINVAL;
 
        retval = oprofilefs_ulong_from_user(&val, buf, count);
-       if (retval)
+       if (retval <= 0)
                return retval;
 
        if (val != 0 && val != 1)
@@ -317,7 +317,7 @@ static ssize_t timer_enabled_write(struct file *file, char const __user *buf,
                return -EINVAL;
 
        retval = oprofilefs_ulong_from_user(&val, buf, count);
-       if (retval)
+       if (retval <= 0)
                return retval;
 
        if (val != 0 && val != 1)
index f60238559af309e00b031eedb17ad02ec77516d8..0748fe0c8a73dfa07aad69af8db1e4dcf5194550 100644 (file)
@@ -114,7 +114,7 @@ static void deliver_alarm(void)
        skew += this_tick - last_tick;
 
        while (skew >= one_tick) {
-               alarm_handler(SIGVTALRM, NULL);
+               alarm_handler(SIGVTALRM, NULL, NULL);
                skew -= one_tick;
        }
 
index 957ec87385afe0c5c53fbac8afac109e75c374d5..fbee9714d9abafd8f4a95c6586acdefaf1e1bd66 100644 (file)
 
 #define MSR_IA32_PERF_STATUS           0x00000198
 #define MSR_IA32_PERF_CTL              0x00000199
+#define MSR_AMD_PSTATE_DEF_BASE                0xc0010064
+#define MSR_AMD_PERF_STATUS            0xc0010063
+#define MSR_AMD_PERF_CTL               0xc0010062
 
 #define MSR_IA32_MPERF                 0x000000e7
 #define MSR_IA32_APERF                 0x000000e8
index 7f2739e03e79a80fc1baaf203cf3a22eccec54dc..0d3d63afa76abd304cb7809461152ce97e3f0828 100644 (file)
@@ -2008,6 +2008,7 @@ __init int intel_pmu_init(void)
                break;
 
        case 28: /* Atom */
+       case 54: /* Cedariew */
                memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
                       sizeof(hw_cache_event_ids));
 
index 520b4265fcd215ee5afe240fe11c944dd6bc06aa..da02e9cc3754b4a2c1a37c1edb44865143f7f723 100644 (file)
@@ -686,7 +686,8 @@ void intel_pmu_lbr_init_atom(void)
         * to have an operational LBR which can freeze
         * on PMU interrupt
         */
-       if (boot_cpu_data.x86_mask < 10) {
+       if (boot_cpu_data.x86_model == 28
+           && boot_cpu_data.x86_mask < 10) {
                pr_cont("LBR disabled due to erratum");
                return;
        }
index 4873e62db6a18468b23736c5f4adfd2de8b3b85b..9e5bcf1e2376e9713adc8841df162293c859c97c 100644 (file)
@@ -225,6 +225,9 @@ static ssize_t microcode_write(struct file *file, const char __user *buf,
        if (do_microcode_update(buf, len) == 0)
                ret = (ssize_t)len;
 
+       if (ret > 0)
+               perf_check_microcode();
+
        mutex_unlock(&microcode_mutex);
        put_online_cpus();
 
index e498b18f010c7b97480ccf1f1018c87a5f07daa1..9fc9aa7ac7034c64dc8cdb59f27f5ac80e1d77ab 100644 (file)
@@ -318,7 +318,7 @@ static void pic_ioport_write(void *opaque, u32 addr, u32 val)
                if (val & 0x10) {
                        u8 edge_irr = s->irr & ~s->elcr;
                        int i;
-                       bool found;
+                       bool found = false;
                        struct kvm_vcpu *vcpu;
 
                        s->init4 = val & 1;
index c00f03de1b794af8fe65387747813d57ed2885a2..b1eb202ee76a9265d920ca1b02d57aa1ff803d9a 100644 (file)
@@ -3619,6 +3619,7 @@ static void seg_setup(int seg)
 
 static int alloc_apic_access_page(struct kvm *kvm)
 {
+       struct page *page;
        struct kvm_userspace_memory_region kvm_userspace_mem;
        int r = 0;
 
@@ -3633,7 +3634,13 @@ static int alloc_apic_access_page(struct kvm *kvm)
        if (r)
                goto out;
 
-       kvm->arch.apic_access_page = gfn_to_page(kvm, 0xfee00);
+       page = gfn_to_page(kvm, 0xfee00);
+       if (is_error_page(page)) {
+               r = -EFAULT;
+               goto out;
+       }
+
+       kvm->arch.apic_access_page = page;
 out:
        mutex_unlock(&kvm->slots_lock);
        return r;
@@ -3641,6 +3648,7 @@ out:
 
 static int alloc_identity_pagetable(struct kvm *kvm)
 {
+       struct page *page;
        struct kvm_userspace_memory_region kvm_userspace_mem;
        int r = 0;
 
@@ -3656,8 +3664,13 @@ static int alloc_identity_pagetable(struct kvm *kvm)
        if (r)
                goto out;
 
-       kvm->arch.ept_identity_pagetable = gfn_to_page(kvm,
-                       kvm->arch.ept_identity_map_addr >> PAGE_SHIFT);
+       page = gfn_to_page(kvm, kvm->arch.ept_identity_map_addr >> PAGE_SHIFT);
+       if (is_error_page(page)) {
+               r = -EFAULT;
+               goto out;
+       }
+
+       kvm->arch.ept_identity_pagetable = page;
 out:
        mutex_unlock(&kvm->slots_lock);
        return r;
@@ -6575,7 +6588,7 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
        /* Exposing INVPCID only when PCID is exposed */
        best = kvm_find_cpuid_entry(vcpu, 0x7, 0);
        if (vmx_invpcid_supported() &&
-           best && (best->ecx & bit(X86_FEATURE_INVPCID)) &&
+           best && (best->ebx & bit(X86_FEATURE_INVPCID)) &&
            guest_cpuid_has_pcid(vcpu)) {
                exec_control |= SECONDARY_EXEC_ENABLE_INVPCID;
                vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
@@ -6585,7 +6598,7 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
                vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
                             exec_control);
                if (best)
-                       best->ecx &= ~bit(X86_FEATURE_INVPCID);
+                       best->ebx &= ~bit(X86_FEATURE_INVPCID);
        }
 }
 
index 148ed666e311fda2979887d6c9e9a2d440f75ffc..2966c847d489d84f1f2b3cb8450daf4bcce1d568 100644 (file)
@@ -5113,17 +5113,20 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu)
                        !kvm_event_needs_reinjection(vcpu);
 }
 
-static void vapic_enter(struct kvm_vcpu *vcpu)
+static int vapic_enter(struct kvm_vcpu *vcpu)
 {
        struct kvm_lapic *apic = vcpu->arch.apic;
        struct page *page;
 
        if (!apic || !apic->vapic_addr)
-               return;
+               return 0;
 
        page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
+       if (is_error_page(page))
+               return -EFAULT;
 
        vcpu->arch.apic->vapic_page = page;
+       return 0;
 }
 
 static void vapic_exit(struct kvm_vcpu *vcpu)
@@ -5430,7 +5433,11 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
        }
 
        vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
-       vapic_enter(vcpu);
+       r = vapic_enter(vcpu);
+       if (r) {
+               srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
+               return r;
+       }
 
        r = 1;
        while (r > 0) {
index b65a76133f4f9b4f51dc426021975d7a5427191e..5141d808e7519d8f38ca8c001e4e5c6c7b318956 100644 (file)
@@ -1283,7 +1283,7 @@ static void xen_flush_tlb_others(const struct cpumask *cpus,
        cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
 
        args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
-       if (start != TLB_FLUSH_ALL && (end - start) <= PAGE_SIZE) {
+       if (end != TLB_FLUSH_ALL && (end - start) <= PAGE_SIZE) {
                args->op.cmd = MMUEXT_INVLPG_MULTI;
                args->op.arg1.linear_addr = start;
        }
index d4b255463253c8b3bb824a915f5764cc2992e202..76ba0e97e530131199e40aec0730554ae7b7825c 100644 (file)
@@ -599,7 +599,7 @@ bool __init early_can_reuse_p2m_middle(unsigned long set_pfn, unsigned long set_
        if (p2m_index(set_pfn))
                return false;
 
-       for (pfn = 0; pfn <= MAX_DOMAIN_PAGES; pfn += P2M_PER_PAGE) {
+       for (pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn += P2M_PER_PAGE) {
                topidx = p2m_top_index(pfn);
 
                if (!p2m_top[topidx])
index 5ef7ba6b6a76a3251956329ffd9ebcfe85ce66b9..d0583a4489e60ee59a6cc0950d7342594a4d5780 100644 (file)
@@ -336,7 +336,7 @@ static int crypto_authenc_genicv(struct aead_request *req, u8 *iv,
                cryptlen += ivsize;
        }
 
-       if (sg_is_last(assoc)) {
+       if (req->assoclen && sg_is_last(assoc)) {
                authenc_ahash_fn = crypto_authenc_ahash;
                sg_init_table(asg, 2);
                sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset);
@@ -490,7 +490,7 @@ static int crypto_authenc_iverify(struct aead_request *req, u8 *iv,
                cryptlen += ivsize;
        }
 
-       if (sg_is_last(assoc)) {
+       if (req->assoclen && sg_is_last(assoc)) {
                authenc_ahash_fn = crypto_authenc_ahash;
                sg_init_table(asg, 2);
                sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset);
index 9628652e080c590fb3f6e01dedc655584c12737b..e0596954290b8e33e20791effc0227b80fb4a0c9 100644 (file)
@@ -237,6 +237,16 @@ static int __acpi_bus_get_power(struct acpi_device *device, int *state)
        } else if (result == ACPI_STATE_D3_HOT) {
                result = ACPI_STATE_D3;
        }
+
+       /*
+        * If we were unsure about the device parent's power state up to this
+        * point, the fact that the device is in D0 implies that the parent has
+        * to be in D0 too.
+        */
+       if (device->parent && device->parent->power.state == ACPI_STATE_UNKNOWN
+           && result == ACPI_STATE_D0)
+               device->parent->power.state = ACPI_STATE_D0;
+
        *state = result;
 
  out:
index fc1803414629d8233b0d6f11819efd166838952e..40e38a06ba854fc04751ec2386ca334d01998eb8 100644 (file)
@@ -107,6 +107,7 @@ struct acpi_power_resource {
 
        /* List of devices relying on this power resource */
        struct acpi_power_resource_device *devices;
+       struct mutex devices_lock;
 };
 
 static struct list_head acpi_power_resource_list;
@@ -225,7 +226,6 @@ static void acpi_power_on_device(struct acpi_power_managed_device *device)
 
 static int __acpi_power_on(struct acpi_power_resource *resource)
 {
-       struct acpi_power_resource_device *device_list = resource->devices;
        acpi_status status = AE_OK;
 
        status = acpi_evaluate_object(resource->device->handle, "_ON", NULL, NULL);
@@ -238,19 +238,15 @@ static int __acpi_power_on(struct acpi_power_resource *resource)
        ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Power resource [%s] turned on\n",
                          resource->name));
 
-       while (device_list) {
-               acpi_power_on_device(device_list->device);
-
-               device_list = device_list->next;
-       }
-
        return 0;
 }
 
 static int acpi_power_on(acpi_handle handle)
 {
        int result = 0;
+       bool resume_device = false;
        struct acpi_power_resource *resource = NULL;
+       struct acpi_power_resource_device *device_list;
 
        result = acpi_power_get_context(handle, &resource);
        if (result)
@@ -266,10 +262,25 @@ static int acpi_power_on(acpi_handle handle)
                result = __acpi_power_on(resource);
                if (result)
                        resource->ref_count--;
+               else
+                       resume_device = true;
        }
 
        mutex_unlock(&resource->resource_lock);
 
+       if (!resume_device)
+               return result;
+
+       mutex_lock(&resource->devices_lock);
+
+       device_list = resource->devices;
+       while (device_list) {
+               acpi_power_on_device(device_list->device);
+               device_list = device_list->next;
+       }
+
+       mutex_unlock(&resource->devices_lock);
+
        return result;
 }
 
@@ -355,7 +366,7 @@ static void __acpi_power_resource_unregister_device(struct device *dev,
        if (acpi_power_get_context(res_handle, &resource))
                return;
 
-       mutex_lock(&resource->resource_lock);
+       mutex_lock(&resource->devices_lock);
        prev = NULL;
        curr = resource->devices;
        while (curr) {
@@ -372,7 +383,7 @@ static void __acpi_power_resource_unregister_device(struct device *dev,
                prev = curr;
                curr = curr->next;
        }
-       mutex_unlock(&resource->resource_lock);
+       mutex_unlock(&resource->devices_lock);
 }
 
 /* Unlink dev from all power resources in _PR0 */
@@ -414,10 +425,10 @@ static int __acpi_power_resource_register_device(
 
        power_resource_device->device = powered_device;
 
-       mutex_lock(&resource->resource_lock);
+       mutex_lock(&resource->devices_lock);
        power_resource_device->next = resource->devices;
        resource->devices = power_resource_device;
-       mutex_unlock(&resource->resource_lock);
+       mutex_unlock(&resource->devices_lock);
 
        return 0;
 }
@@ -462,7 +473,7 @@ int acpi_power_resource_register_device(struct device *dev, acpi_handle handle)
        return ret;
 
 no_power_resource:
-       printk(KERN_WARNING PREFIX "Invalid Power Resource to register!");
+       printk(KERN_DEBUG PREFIX "Invalid Power Resource to register!");
        return -ENODEV;
 }
 EXPORT_SYMBOL_GPL(acpi_power_resource_register_device);
@@ -721,6 +732,7 @@ static int acpi_power_add(struct acpi_device *device)
 
        resource->device = device;
        mutex_init(&resource->resource_lock);
+       mutex_init(&resource->devices_lock);
        strcpy(resource->name, device->pnp.bus_id);
        strcpy(acpi_device_name(device), ACPI_POWER_DEVICE_NAME);
        strcpy(acpi_device_class(device), ACPI_POWER_CLASS);
index bfc31cb0dd3eac96292af65535c2ee4554cdbf10..e78c2a52ea46665fe3ea31a7e79ba7f5cc5c38b2 100644 (file)
@@ -475,7 +475,7 @@ static __ref int acpi_processor_start(struct acpi_processor *pr)
        acpi_processor_get_limit_info(pr);
 
        if (!cpuidle_get_driver() || cpuidle_get_driver() == &acpi_idle_driver)
-               acpi_processor_power_init(pr, device);
+               acpi_processor_power_init(pr);
 
        pr->cdev = thermal_cooling_device_register("Processor", device,
                                                   &processor_cooling_ops);
@@ -509,7 +509,7 @@ err_remove_sysfs_thermal:
 err_thermal_unregister:
        thermal_cooling_device_unregister(pr->cdev);
 err_power_exit:
-       acpi_processor_power_exit(pr, device);
+       acpi_processor_power_exit(pr);
 
        return result;
 }
@@ -620,7 +620,7 @@ static int acpi_processor_remove(struct acpi_device *device, int type)
                        return -EINVAL;
        }
 
-       acpi_processor_power_exit(pr, device);
+       acpi_processor_power_exit(pr);
 
        sysfs_remove_link(&device->dev.kobj, "sysdev");
 
@@ -905,8 +905,6 @@ static int __init acpi_processor_init(void)
        if (acpi_disabled)
                return 0;
 
-       memset(&errata, 0, sizeof(errata));
-
        result = acpi_bus_register_driver(&acpi_processor_driver);
        if (result < 0)
                return result;
index ad3730b4038bdfb7b2dc24fb3b396abfdf0a5703..3655ab923812c10501e86cde8b166cb18be70065 100644 (file)
@@ -79,6 +79,8 @@ module_param(bm_check_disable, uint, 0000);
 static unsigned int latency_factor __read_mostly = 2;
 module_param(latency_factor, uint, 0644);
 
+static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device);
+
 static int disabled_by_idle_boot_param(void)
 {
        return boot_option_idle_override == IDLE_POLL ||
@@ -483,8 +485,6 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
                if (obj->type != ACPI_TYPE_INTEGER)
                        continue;
 
-               cx.power = obj->integer.value;
-
                current_count++;
                memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx));
 
@@ -1000,7 +1000,7 @@ static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr)
        int i, count = CPUIDLE_DRIVER_STATE_START;
        struct acpi_processor_cx *cx;
        struct cpuidle_state_usage *state_usage;
-       struct cpuidle_device *dev = &pr->power.dev;
+       struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id);
 
        if (!pr->flags.power_setup_done)
                return -EINVAL;
@@ -1132,6 +1132,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
 int acpi_processor_hotplug(struct acpi_processor *pr)
 {
        int ret = 0;
+       struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id);
 
        if (disabled_by_idle_boot_param())
                return 0;
@@ -1147,11 +1148,11 @@ int acpi_processor_hotplug(struct acpi_processor *pr)
                return -ENODEV;
 
        cpuidle_pause_and_lock();
-       cpuidle_disable_device(&pr->power.dev);
+       cpuidle_disable_device(dev);
        acpi_processor_get_power_info(pr);
        if (pr->flags.power) {
                acpi_processor_setup_cpuidle_cx(pr);
-               ret = cpuidle_enable_device(&pr->power.dev);
+               ret = cpuidle_enable_device(dev);
        }
        cpuidle_resume_and_unlock();
 
@@ -1162,6 +1163,7 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
 {
        int cpu;
        struct acpi_processor *_pr;
+       struct cpuidle_device *dev;
 
        if (disabled_by_idle_boot_param())
                return 0;
@@ -1192,7 +1194,8 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
                        _pr = per_cpu(processors, cpu);
                        if (!_pr || !_pr->flags.power_setup_done)
                                continue;
-                       cpuidle_disable_device(&_pr->power.dev);
+                       dev = per_cpu(acpi_cpuidle_device, cpu);
+                       cpuidle_disable_device(dev);
                }
 
                /* Populate Updated C-state information */
@@ -1206,7 +1209,8 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
                        acpi_processor_get_power_info(_pr);
                        if (_pr->flags.power) {
                                acpi_processor_setup_cpuidle_cx(_pr);
-                               cpuidle_enable_device(&_pr->power.dev);
+                               dev = per_cpu(acpi_cpuidle_device, cpu);
+                               cpuidle_enable_device(dev);
                        }
                }
                put_online_cpus();
@@ -1218,11 +1222,11 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
 
 static int acpi_processor_registered;
 
-int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
-                             struct acpi_device *device)
+int __cpuinit acpi_processor_power_init(struct acpi_processor *pr)
 {
        acpi_status status = 0;
        int retval;
+       struct cpuidle_device *dev;
        static int first_run;
 
        if (disabled_by_idle_boot_param())
@@ -1268,11 +1272,18 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
                        printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n",
                                        acpi_idle_driver.name);
                }
+
+               dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+               if (!dev)
+                       return -ENOMEM;
+               per_cpu(acpi_cpuidle_device, pr->id) = dev;
+
+               acpi_processor_setup_cpuidle_cx(pr);
+
                /* Register per-cpu cpuidle_device. Cpuidle driver
                 * must already be registered before registering device
                 */
-               acpi_processor_setup_cpuidle_cx(pr);
-               retval = cpuidle_register_device(&pr->power.dev);
+               retval = cpuidle_register_device(dev);
                if (retval) {
                        if (acpi_processor_registered == 0)
                                cpuidle_unregister_driver(&acpi_idle_driver);
@@ -1283,14 +1294,15 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
        return 0;
 }
 
-int acpi_processor_power_exit(struct acpi_processor *pr,
-                             struct acpi_device *device)
+int acpi_processor_power_exit(struct acpi_processor *pr)
 {
+       struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id);
+
        if (disabled_by_idle_boot_param())
                return 0;
 
        if (pr->flags.power) {
-               cpuidle_unregister_device(&pr->power.dev);
+               cpuidle_unregister_device(dev);
                acpi_processor_registered--;
                if (acpi_processor_registered == 0)
                        cpuidle_unregister_driver(&acpi_idle_driver);
index a093dc163a42a8b677d9ccb7b27a61d0427c3f49..836bfe0690422855c80ecfe41895a4fe667bfd14 100644 (file)
@@ -324,6 +324,34 @@ static int acpi_processor_get_performance_control(struct acpi_processor *pr)
        return result;
 }
 
+#ifdef CONFIG_X86
+/*
+ * Some AMDs have 50MHz frequency multiples, but only provide 100MHz rounding
+ * in their ACPI data. Calculate the real values and fix up the _PSS data.
+ */
+static void amd_fixup_frequency(struct acpi_processor_px *px, int i)
+{
+       u32 hi, lo, fid, did;
+       int index = px->control & 0x00000007;
+
+       if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
+               return;
+
+       if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10)
+           || boot_cpu_data.x86 == 0x11) {
+               rdmsr(MSR_AMD_PSTATE_DEF_BASE + index, lo, hi);
+               fid = lo & 0x3f;
+               did = (lo >> 6) & 7;
+               if (boot_cpu_data.x86 == 0x10)
+                       px->core_frequency = (100 * (fid + 0x10)) >> did;
+               else
+                       px->core_frequency = (100 * (fid + 8)) >> did;
+       }
+}
+#else
+static void amd_fixup_frequency(struct acpi_processor_px *px, int i) {};
+#endif
+
 static int acpi_processor_get_performance_states(struct acpi_processor *pr)
 {
        int result = 0;
@@ -379,6 +407,8 @@ static int acpi_processor_get_performance_states(struct acpi_processor *pr)
                        goto end;
                }
 
+               amd_fixup_frequency(px, i);
+
                ACPI_DEBUG_PRINT((ACPI_DB_INFO,
                                  "State [%d]: core_frequency[%d] power[%d] transition_latency[%d] bus_master_latency[%d] control[0x%x] status[0x%x]\n",
                                  i,
index 50d5dea0ff599feb19626ff80b8143dff1ae4f6e..7862d17976b7532f48204cbf4210ffb6d4984f97 100644 (file)
@@ -268,6 +268,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
        /* JMicron 360/1/3/5/6, match class to avoid IDE function */
        { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
          PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci_ign_iferr },
+       /* JMicron 362B and 362C have an AHCI function with IDE class code */
+       { PCI_VDEVICE(JMICRON, 0x2362), board_ahci_ign_iferr },
+       { PCI_VDEVICE(JMICRON, 0x236f), board_ahci_ign_iferr },
 
        /* ATI */
        { PCI_VDEVICE(ATI, 0x4380), board_ahci_sb600 }, /* ATI SB600 */
@@ -393,6 +396,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
          .driver_data = board_ahci_yes_fbs },                  /* 88se9125 */
        { PCI_DEVICE(0x1b4b, 0x917a),
          .driver_data = board_ahci_yes_fbs },                  /* 88se9172 */
+       { PCI_DEVICE(0x1b4b, 0x9192),
+         .driver_data = board_ahci_yes_fbs },                  /* 88se9172 on some Gigabyte */
        { PCI_DEVICE(0x1b4b, 0x91a3),
          .driver_data = board_ahci_yes_fbs },
 
@@ -400,7 +405,10 @@ static const struct pci_device_id ahci_pci_tbl[] = {
        { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci },   /* PDC42819 */
 
        /* Asmedia */
-       { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci },   /* ASM1061 */
+       { PCI_VDEVICE(ASMEDIA, 0x0601), board_ahci },   /* ASM1060 */
+       { PCI_VDEVICE(ASMEDIA, 0x0602), board_ahci },   /* ASM1060 */
+       { PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci },   /* ASM1061 */
+       { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci },   /* ASM1062 */
 
        /* Generic, PCI class code for AHCI */
        { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
index 78efb0306a446027f7fce1019572902f5aaa805b..34d94c762a1e79415fc00b0fc59caf034a3e186a 100644 (file)
@@ -250,7 +250,7 @@ int __init dma_declare_contiguous(struct device *dev, unsigned long size,
                return -EINVAL;
 
        /* Sanitise input arguments */
-       alignment = PAGE_SIZE << max(MAX_ORDER, pageblock_order);
+       alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
        base = ALIGN(base, alignment);
        size = ALIGN(size, alignment);
        limit &= ~(alignment - 1);
index a1a72250258705eb059b6601b086cf9d296b1d1b..d51514b79efedc4ad446e5ce6dd87a8ee9d2fb6d 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/pm_runtime.h>
 
 #include "base.h"
+#include "power/power.h"
 
 #define to_platform_driver(drv)        (container_of((drv), struct platform_driver, \
                                 driver))
@@ -948,6 +949,7 @@ void __init early_platform_add_devices(struct platform_device **devs, int num)
                dev = &devs[i]->dev;
 
                if (!dev->devres_head.next) {
+                       pm_runtime_early_init(dev);
                        INIT_LIST_HEAD(&dev->devres_head);
                        list_add_tail(&dev->devres_head,
                                      &early_platform_device_list);
index ba3487c9835b67fa64daad5f6e23016f23a7f569..c22b869245d9498e7052fdc42ceacf3aaa4e332e 100644 (file)
 static LIST_HEAD(gpd_list);
 static DEFINE_MUTEX(gpd_list_lock);
 
+static struct generic_pm_domain *pm_genpd_lookup_name(const char *domain_name)
+{
+       struct generic_pm_domain *genpd = NULL, *gpd;
+
+       if (IS_ERR_OR_NULL(domain_name))
+               return NULL;
+
+       mutex_lock(&gpd_list_lock);
+       list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
+               if (!strcmp(gpd->name, domain_name)) {
+                       genpd = gpd;
+                       break;
+               }
+       }
+       mutex_unlock(&gpd_list_lock);
+       return genpd;
+}
+
 #ifdef CONFIG_PM
 
 struct generic_pm_domain *dev_to_genpd(struct device *dev)
@@ -256,10 +274,28 @@ int pm_genpd_poweron(struct generic_pm_domain *genpd)
        return ret;
 }
 
+/**
+ * pm_genpd_name_poweron - Restore power to a given PM domain and its masters.
+ * @domain_name: Name of the PM domain to power up.
+ */
+int pm_genpd_name_poweron(const char *domain_name)
+{
+       struct generic_pm_domain *genpd;
+
+       genpd = pm_genpd_lookup_name(domain_name);
+       return genpd ? pm_genpd_poweron(genpd) : -EINVAL;
+}
+
 #endif /* CONFIG_PM */
 
 #ifdef CONFIG_PM_RUNTIME
 
+static int genpd_start_dev_no_timing(struct generic_pm_domain *genpd,
+                                    struct device *dev)
+{
+       return GENPD_DEV_CALLBACK(genpd, int, start, dev);
+}
+
 static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
 {
        return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev,
@@ -436,7 +472,7 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
        not_suspended = 0;
        list_for_each_entry(pdd, &genpd->dev_list, list_node)
                if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev)
-                   || pdd->dev->power.irq_safe || to_gpd_data(pdd)->always_on))
+                   || pdd->dev->power.irq_safe))
                        not_suspended++;
 
        if (not_suspended > genpd->in_progress)
@@ -578,9 +614,6 @@ static int pm_genpd_runtime_suspend(struct device *dev)
 
        might_sleep_if(!genpd->dev_irq_safe);
 
-       if (dev_gpd_data(dev)->always_on)
-               return -EBUSY;
-
        stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
        if (stop_ok && !stop_ok(dev))
                return -EBUSY;
@@ -629,7 +662,7 @@ static int pm_genpd_runtime_resume(struct device *dev)
 
        /* If power.irq_safe, the PM domain is never powered off. */
        if (dev->power.irq_safe)
-               return genpd_start_dev(genpd, dev);
+               return genpd_start_dev_no_timing(genpd, dev);
 
        mutex_lock(&genpd->lock);
        ret = __pm_genpd_poweron(genpd);
@@ -697,6 +730,24 @@ static inline void genpd_power_off_work_fn(struct work_struct *work) {}
 
 #ifdef CONFIG_PM_SLEEP
 
+/**
+ * pm_genpd_present - Check if the given PM domain has been initialized.
+ * @genpd: PM domain to check.
+ */
+static bool pm_genpd_present(struct generic_pm_domain *genpd)
+{
+       struct generic_pm_domain *gpd;
+
+       if (IS_ERR_OR_NULL(genpd))
+               return false;
+
+       list_for_each_entry(gpd, &gpd_list, gpd_list_node)
+               if (gpd == genpd)
+                       return true;
+
+       return false;
+}
+
 static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
                                    struct device *dev)
 {
@@ -750,9 +801,10 @@ static int genpd_thaw_dev(struct generic_pm_domain *genpd, struct device *dev)
  * Check if the given PM domain can be powered off (during system suspend or
  * hibernation) and do that if so.  Also, in that case propagate to its masters.
  *
- * This function is only called in "noirq" stages of system power transitions,
- * so it need not acquire locks (all of the "noirq" callbacks are executed
- * sequentially, so it is guaranteed that it will never run twice in parallel).
+ * This function is only called in "noirq" and "syscore" stages of system power
+ * transitions, so it need not acquire locks (all of the "noirq" callbacks are
+ * executed sequentially, so it is guaranteed that it will never run twice in
+ * parallel).
  */
 static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
 {
@@ -776,6 +828,33 @@ static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
        }
 }
 
+/**
+ * pm_genpd_sync_poweron - Synchronously power on a PM domain and its masters.
+ * @genpd: PM domain to power on.
+ *
+ * This function is only called in "noirq" and "syscore" stages of system power
+ * transitions, so it need not acquire locks (all of the "noirq" callbacks are
+ * executed sequentially, so it is guaranteed that it will never run twice in
+ * parallel).
+ */
+static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd)
+{
+       struct gpd_link *link;
+
+       if (genpd->status != GPD_STATE_POWER_OFF)
+               return;
+
+       list_for_each_entry(link, &genpd->slave_links, slave_node) {
+               pm_genpd_sync_poweron(link->master);
+               genpd_sd_counter_inc(link->master);
+       }
+
+       if (genpd->power_on)
+               genpd->power_on(genpd);
+
+       genpd->status = GPD_STATE_ACTIVE;
+}
+
 /**
  * resume_needed - Check whether to resume a device before system suspend.
  * @dev: Device to check.
@@ -937,7 +1016,7 @@ static int pm_genpd_suspend_noirq(struct device *dev)
        if (IS_ERR(genpd))
                return -EINVAL;
 
-       if (genpd->suspend_power_off || dev_gpd_data(dev)->always_on
+       if (genpd->suspend_power_off
            || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
                return 0;
 
@@ -970,7 +1049,7 @@ static int pm_genpd_resume_noirq(struct device *dev)
        if (IS_ERR(genpd))
                return -EINVAL;
 
-       if (genpd->suspend_power_off || dev_gpd_data(dev)->always_on
+       if (genpd->suspend_power_off
            || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
                return 0;
 
@@ -979,7 +1058,7 @@ static int pm_genpd_resume_noirq(struct device *dev)
         * guaranteed that this function will never run twice in parallel for
         * the same PM domain, so it is not necessary to use locking here.
         */
-       pm_genpd_poweron(genpd);
+       pm_genpd_sync_poweron(genpd);
        genpd->suspended_count--;
 
        return genpd_start_dev(genpd, dev);
@@ -1090,8 +1169,7 @@ static int pm_genpd_freeze_noirq(struct device *dev)
        if (IS_ERR(genpd))
                return -EINVAL;
 
-       return genpd->suspend_power_off || dev_gpd_data(dev)->always_on ?
-               0 : genpd_stop_dev(genpd, dev);
+       return genpd->suspend_power_off ? 0 : genpd_stop_dev(genpd, dev);
 }
 
 /**
@@ -1111,8 +1189,7 @@ static int pm_genpd_thaw_noirq(struct device *dev)
        if (IS_ERR(genpd))
                return -EINVAL;
 
-       return genpd->suspend_power_off || dev_gpd_data(dev)->always_on ?
-               0 : genpd_start_dev(genpd, dev);
+       return genpd->suspend_power_off ? 0 : genpd_start_dev(genpd, dev);
 }
 
 /**
@@ -1186,8 +1263,8 @@ static int pm_genpd_restore_noirq(struct device *dev)
        if (genpd->suspended_count++ == 0) {
                /*
                 * The boot kernel might put the domain into arbitrary state,
-                * so make it appear as powered off to pm_genpd_poweron(), so
-                * that it tries to power it on in case it was really off.
+                * so make it appear as powered off to pm_genpd_sync_poweron(),
+                * so that it tries to power it on in case it was really off.
                 */
                genpd->status = GPD_STATE_POWER_OFF;
                if (genpd->suspend_power_off) {
@@ -1205,9 +1282,9 @@ static int pm_genpd_restore_noirq(struct device *dev)
        if (genpd->suspend_power_off)
                return 0;
 
-       pm_genpd_poweron(genpd);
+       pm_genpd_sync_poweron(genpd);
 
-       return dev_gpd_data(dev)->always_on ? 0 : genpd_start_dev(genpd, dev);
+       return genpd_start_dev(genpd, dev);
 }
 
 /**
@@ -1246,6 +1323,31 @@ static void pm_genpd_complete(struct device *dev)
        }
 }
 
+/**
+ * pm_genpd_syscore_switch - Switch power during system core suspend or resume.
+ * @dev: Device that normally is marked as "always on" to switch power for.
+ *
+ * This routine may only be called during the system core (syscore) suspend or
+ * resume phase for devices whose "always on" flags are set.
+ */
+void pm_genpd_syscore_switch(struct device *dev, bool suspend)
+{
+       struct generic_pm_domain *genpd;
+
+       genpd = dev_to_genpd(dev);
+       if (!pm_genpd_present(genpd))
+               return;
+
+       if (suspend) {
+               genpd->suspended_count++;
+               pm_genpd_sync_poweroff(genpd);
+       } else {
+               pm_genpd_sync_poweron(genpd);
+               genpd->suspended_count--;
+       }
+}
+EXPORT_SYMBOL_GPL(pm_genpd_syscore_switch);
+
 #else
 
 #define pm_genpd_prepare               NULL
@@ -1393,6 +1495,19 @@ int __pm_genpd_of_add_device(struct device_node *genpd_node, struct device *dev,
        return __pm_genpd_add_device(genpd, dev, td);
 }
 
+
+/**
+ * __pm_genpd_name_add_device - Find I/O PM domain and add a device to it.
+ * @domain_name: Name of the PM domain to add the device to.
+ * @dev: Device to be added.
+ * @td: Set of PM QoS timing parameters to attach to the device.
+ */
+int __pm_genpd_name_add_device(const char *domain_name, struct device *dev,
+                              struct gpd_timing_data *td)
+{
+       return __pm_genpd_add_device(pm_genpd_lookup_name(domain_name), dev, td);
+}
+
 /**
  * pm_genpd_remove_device - Remove a device from an I/O PM domain.
  * @genpd: PM domain to remove the device from.
@@ -1454,26 +1569,6 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
        return ret;
 }
 
-/**
- * pm_genpd_dev_always_on - Set/unset the "always on" flag for a given device.
- * @dev: Device to set/unset the flag for.
- * @val: The new value of the device's "always on" flag.
- */
-void pm_genpd_dev_always_on(struct device *dev, bool val)
-{
-       struct pm_subsys_data *psd;
-       unsigned long flags;
-
-       spin_lock_irqsave(&dev->power.lock, flags);
-
-       psd = dev_to_psd(dev);
-       if (psd && psd->domain_data)
-               to_gpd_data(psd->domain_data)->always_on = val;
-
-       spin_unlock_irqrestore(&dev->power.lock, flags);
-}
-EXPORT_SYMBOL_GPL(pm_genpd_dev_always_on);
-
 /**
  * pm_genpd_dev_need_restore - Set/unset the device's "need restore" flag.
  * @dev: Device to set/unset the flag for.
@@ -1505,7 +1600,8 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
        struct gpd_link *link;
        int ret = 0;
 
-       if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
+       if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
+           || genpd == subdomain)
                return -EINVAL;
 
  start:
@@ -1551,6 +1647,35 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
        return ret;
 }
 
+/**
+ * pm_genpd_add_subdomain_names - Add a subdomain to an I/O PM domain.
+ * @master_name: Name of the master PM domain to add the subdomain to.
+ * @subdomain_name: Name of the subdomain to be added.
+ */
+int pm_genpd_add_subdomain_names(const char *master_name,
+                                const char *subdomain_name)
+{
+       struct generic_pm_domain *master = NULL, *subdomain = NULL, *gpd;
+
+       if (IS_ERR_OR_NULL(master_name) || IS_ERR_OR_NULL(subdomain_name))
+               return -EINVAL;
+
+       mutex_lock(&gpd_list_lock);
+       list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
+               if (!master && !strcmp(gpd->name, master_name))
+                       master = gpd;
+
+               if (!subdomain && !strcmp(gpd->name, subdomain_name))
+                       subdomain = gpd;
+
+               if (master && subdomain)
+                       break;
+       }
+       mutex_unlock(&gpd_list_lock);
+
+       return pm_genpd_add_subdomain(master, subdomain);
+}
+
 /**
  * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
  * @genpd: Master PM domain to remove the subdomain from.
@@ -1704,7 +1829,16 @@ int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
 }
 EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks);
 
-int genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
+/**
+ * pm_genpd_attach_cpuidle - Connect the given PM domain with cpuidle.
+ * @genpd: PM domain to be connected with cpuidle.
+ * @state: cpuidle state this domain can disable/enable.
+ *
+ * Make a PM domain behave as though it contained a CPU core, that is, instead
+ * of calling its power down routine it will enable the given cpuidle state so
+ * that the cpuidle subsystem can power it down (if possible and desirable).
+ */
+int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
 {
        struct cpuidle_driver *cpuidle_drv;
        struct gpd_cpu_data *cpu_data;
@@ -1753,7 +1887,24 @@ int genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
        goto out;
 }
 
-int genpd_detach_cpuidle(struct generic_pm_domain *genpd)
+/**
+ * pm_genpd_name_attach_cpuidle - Find PM domain and connect cpuidle to it.
+ * @name: Name of the domain to connect to cpuidle.
+ * @state: cpuidle state this domain can manipulate.
+ */
+int pm_genpd_name_attach_cpuidle(const char *name, int state)
+{
+       return pm_genpd_attach_cpuidle(pm_genpd_lookup_name(name), state);
+}
+
+/**
+ * pm_genpd_detach_cpuidle - Remove the cpuidle connection from a PM domain.
+ * @genpd: PM domain to remove the cpuidle connection from.
+ *
+ * Remove the cpuidle connection set up by pm_genpd_attach_cpuidle() from the
+ * given PM domain.
+ */
+int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
 {
        struct gpd_cpu_data *cpu_data;
        struct cpuidle_state *idle_state;
@@ -1784,6 +1935,15 @@ int genpd_detach_cpuidle(struct generic_pm_domain *genpd)
        return ret;
 }
 
+/**
+ * pm_genpd_name_detach_cpuidle - Find PM domain and disconnect cpuidle from it.
+ * @name: Name of the domain to disconnect cpuidle from.
+ */
+int pm_genpd_name_detach_cpuidle(const char *name)
+{
+       return pm_genpd_detach_cpuidle(pm_genpd_lookup_name(name));
+}
+
 /* Default device callbacks for generic PM domains. */
 
 /**
index 0113adc310dccc4a62a325cccde4221f34c5ce7e..008e6786ae7919e3e4165e6b605cb6a7836ad4c6 100644 (file)
@@ -57,20 +57,17 @@ static pm_message_t pm_transition;
 static int async_error;
 
 /**
- * device_pm_init - Initialize the PM-related part of a device object.
+ * device_pm_sleep_init - Initialize system suspend-related device fields.
  * @dev: Device object being initialized.
  */
-void device_pm_init(struct device *dev)
+void device_pm_sleep_init(struct device *dev)
 {
        dev->power.is_prepared = false;
        dev->power.is_suspended = false;
        init_completion(&dev->power.completion);
        complete_all(&dev->power.completion);
        dev->power.wakeup = NULL;
-       spin_lock_init(&dev->power.lock);
-       pm_runtime_init(dev);
        INIT_LIST_HEAD(&dev->power.entry);
-       dev->power.power_state = PMSG_INVALID;
 }
 
 /**
@@ -408,6 +405,9 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
        TRACE_DEVICE(dev);
        TRACE_RESUME(0);
 
+       if (dev->power.syscore)
+               goto Out;
+
        if (dev->pm_domain) {
                info = "noirq power domain ";
                callback = pm_noirq_op(&dev->pm_domain->ops, state);
@@ -429,6 +429,7 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
 
        error = dpm_run_callback(callback, dev, state, info);
 
+ Out:
        TRACE_RESUME(error);
        return error;
 }
@@ -486,6 +487,9 @@ static int device_resume_early(struct device *dev, pm_message_t state)
        TRACE_DEVICE(dev);
        TRACE_RESUME(0);
 
+       if (dev->power.syscore)
+               goto Out;
+
        if (dev->pm_domain) {
                info = "early power domain ";
                callback = pm_late_early_op(&dev->pm_domain->ops, state);
@@ -507,6 +511,7 @@ static int device_resume_early(struct device *dev, pm_message_t state)
 
        error = dpm_run_callback(callback, dev, state, info);
 
+ Out:
        TRACE_RESUME(error);
        return error;
 }
@@ -565,11 +570,13 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
        pm_callback_t callback = NULL;
        char *info = NULL;
        int error = 0;
-       bool put = false;
 
        TRACE_DEVICE(dev);
        TRACE_RESUME(0);
 
+       if (dev->power.syscore)
+               goto Complete;
+
        dpm_wait(dev->parent, async);
        device_lock(dev);
 
@@ -583,7 +590,6 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
                goto Unlock;
 
        pm_runtime_enable(dev);
-       put = true;
 
        if (dev->pm_domain) {
                info = "power domain ";
@@ -632,13 +638,12 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
 
  Unlock:
        device_unlock(dev);
+
+ Complete:
        complete_all(&dev->power.completion);
 
        TRACE_RESUME(error);
 
-       if (put)
-               pm_runtime_put_sync(dev);
-
        return error;
 }
 
@@ -722,6 +727,9 @@ static void device_complete(struct device *dev, pm_message_t state)
        void (*callback)(struct device *) = NULL;
        char *info = NULL;
 
+       if (dev->power.syscore)
+               return;
+
        device_lock(dev);
 
        if (dev->pm_domain) {
@@ -749,6 +757,8 @@ static void device_complete(struct device *dev, pm_message_t state)
        }
 
        device_unlock(dev);
+
+       pm_runtime_put_sync(dev);
 }
 
 /**
@@ -834,6 +844,9 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
        pm_callback_t callback = NULL;
        char *info = NULL;
 
+       if (dev->power.syscore)
+               return 0;
+
        if (dev->pm_domain) {
                info = "noirq power domain ";
                callback = pm_noirq_op(&dev->pm_domain->ops, state);
@@ -917,6 +930,9 @@ static int device_suspend_late(struct device *dev, pm_message_t state)
        pm_callback_t callback = NULL;
        char *info = NULL;
 
+       if (dev->power.syscore)
+               return 0;
+
        if (dev->pm_domain) {
                info = "late power domain ";
                callback = pm_late_early_op(&dev->pm_domain->ops, state);
@@ -996,7 +1012,7 @@ int dpm_suspend_end(pm_message_t state)
 
        error = dpm_suspend_noirq(state);
        if (error) {
-               dpm_resume_early(state);
+               dpm_resume_early(resume_event(state));
                return error;
        }
 
@@ -1043,16 +1059,23 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
        if (async_error)
                goto Complete;
 
-       pm_runtime_get_noresume(dev);
+       /*
+        * If a device configured to wake up the system from sleep states
+        * has been suspended at run time and there's a resume request pending
+        * for it, this is equivalent to the device signaling wakeup, so the
+        * system suspend operation should be aborted.
+        */
        if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
                pm_wakeup_event(dev, 0);
 
        if (pm_wakeup_pending()) {
-               pm_runtime_put_sync(dev);
                async_error = -EBUSY;
                goto Complete;
        }
 
+       if (dev->power.syscore)
+               goto Complete;
+
        device_lock(dev);
 
        if (dev->pm_domain) {
@@ -1111,12 +1134,10 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
  Complete:
        complete_all(&dev->power.completion);
 
-       if (error) {
-               pm_runtime_put_sync(dev);
+       if (error)
                async_error = error;
-       } else if (dev->power.is_suspended) {
+       else if (dev->power.is_suspended)
                __pm_runtime_disable(dev, false);
-       }
 
        return error;
 }
@@ -1209,6 +1230,17 @@ static int device_prepare(struct device *dev, pm_message_t state)
        char *info = NULL;
        int error = 0;
 
+       if (dev->power.syscore)
+               return 0;
+
+       /*
+        * If a device's parent goes into runtime suspend at the wrong time,
+        * it won't be possible to resume the device.  To prevent this we
+        * block runtime suspend here, during the prepare phase, and allow
+        * it again during the complete phase.
+        */
+       pm_runtime_get_noresume(dev);
+
        device_lock(dev);
 
        dev->power.wakeup_path = device_may_wakeup(dev);
index ac993eafec82ecd707cfc6e2654b5a8f30703fc6..d9468642fc414c22f8578c82e57e2cac9ba224e5 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/rculist.h>
 #include <linux/rcupdate.h>
 #include <linux/opp.h>
+#include <linux/of.h>
 
 /*
  * Internal data structure organization with the OPP layer library is as
@@ -674,3 +675,49 @@ struct srcu_notifier_head *opp_get_notifier(struct device *dev)
 
        return &dev_opp->head;
 }
+
+#ifdef CONFIG_OF
+/**
+ * of_init_opp_table() - Initialize opp table from device tree
+ * @dev:       device pointer used to lookup device OPPs.
+ *
+ * Register the initial OPP table with the OPP library for given device.
+ */
+int of_init_opp_table(struct device *dev)
+{
+       const struct property *prop;
+       const __be32 *val;
+       int nr;
+
+       prop = of_find_property(dev->of_node, "operating-points", NULL);
+       if (!prop)
+               return -ENODEV;
+       if (!prop->value)
+               return -ENODATA;
+
+       /*
+        * Each OPP is a set of tuples consisting of frequency and
+        * voltage like <freq-kHz vol-uV>.
+        */
+       nr = prop->length / sizeof(u32);
+       if (nr % 2) {
+               dev_err(dev, "%s: Invalid OPP list\n", __func__);
+               return -EINVAL;
+       }
+
+       val = prop->value;
+       while (nr) {
+               unsigned long freq = be32_to_cpup(val++) * 1000;
+               unsigned long volt = be32_to_cpup(val++);
+
+               if (opp_add(dev, freq, volt)) {
+                       dev_warn(dev, "%s: Failed to add OPP %ld\n",
+                                __func__, freq);
+                       continue;
+               }
+               nr -= 2;
+       }
+
+       return 0;
+}
+#endif
index eeb4bff9505cd6dccd5d2cb30671fd69105966ef..0dbfdf4419af8914136060f0b9c88838e4f2db4a 100644 (file)
@@ -1,12 +1,32 @@
 #include <linux/pm_qos.h>
 
+static inline void device_pm_init_common(struct device *dev)
+{
+       if (!dev->power.early_init) {
+               spin_lock_init(&dev->power.lock);
+               dev->power.power_state = PMSG_INVALID;
+               dev->power.early_init = true;
+       }
+}
+
 #ifdef CONFIG_PM_RUNTIME
 
+static inline void pm_runtime_early_init(struct device *dev)
+{
+       dev->power.disable_depth = 1;
+       device_pm_init_common(dev);
+}
+
 extern void pm_runtime_init(struct device *dev);
 extern void pm_runtime_remove(struct device *dev);
 
 #else /* !CONFIG_PM_RUNTIME */
 
+static inline void pm_runtime_early_init(struct device *dev)
+{
+       device_pm_init_common(dev);
+}
+
 static inline void pm_runtime_init(struct device *dev) {}
 static inline void pm_runtime_remove(struct device *dev) {}
 
@@ -25,7 +45,7 @@ static inline struct device *to_device(struct list_head *entry)
        return container_of(entry, struct device, power.entry);
 }
 
-extern void device_pm_init(struct device *dev);
+extern void device_pm_sleep_init(struct device *dev);
 extern void device_pm_add(struct device *);
 extern void device_pm_remove(struct device *);
 extern void device_pm_move_before(struct device *, struct device *);
@@ -34,12 +54,7 @@ extern void device_pm_move_last(struct device *);
 
 #else /* !CONFIG_PM_SLEEP */
 
-static inline void device_pm_init(struct device *dev)
-{
-       spin_lock_init(&dev->power.lock);
-       dev->power.power_state = PMSG_INVALID;
-       pm_runtime_init(dev);
-}
+static inline void device_pm_sleep_init(struct device *dev) {}
 
 static inline void device_pm_add(struct device *dev)
 {
@@ -60,6 +75,13 @@ static inline void device_pm_move_last(struct device *dev) {}
 
 #endif /* !CONFIG_PM_SLEEP */
 
+static inline void device_pm_init(struct device *dev)
+{
+       device_pm_init_common(dev);
+       device_pm_sleep_init(dev);
+       pm_runtime_init(dev);
+}
+
 #ifdef CONFIG_PM
 
 /*
index 7d9c1cb1c39a7760081bae4d518efd8acf835902..3148b10dc2e59b92dd5913e2b34c446d54ac69b7 100644 (file)
@@ -509,6 +509,9 @@ static int rpm_resume(struct device *dev, int rpmflags)
  repeat:
        if (dev->power.runtime_error)
                retval = -EINVAL;
+       else if (dev->power.disable_depth == 1 && dev->power.is_suspended
+           && dev->power.runtime_status == RPM_ACTIVE)
+               retval = 1;
        else if (dev->power.disable_depth > 0)
                retval = -EACCES;
        if (retval)
index cbb463b3a750e30bc70d7d8de40ff7555e7670ee..e6ee5e80e546a1c7895194e529e2e70f5e53ce7f 100644 (file)
@@ -127,6 +127,8 @@ EXPORT_SYMBOL_GPL(wakeup_source_destroy);
  */
 void wakeup_source_add(struct wakeup_source *ws)
 {
+       unsigned long flags;
+
        if (WARN_ON(!ws))
                return;
 
@@ -135,9 +137,9 @@ void wakeup_source_add(struct wakeup_source *ws)
        ws->active = false;
        ws->last_time = ktime_get();
 
-       spin_lock_irq(&events_lock);
+       spin_lock_irqsave(&events_lock, flags);
        list_add_rcu(&ws->entry, &wakeup_sources);
-       spin_unlock_irq(&events_lock);
+       spin_unlock_irqrestore(&events_lock, flags);
 }
 EXPORT_SYMBOL_GPL(wakeup_source_add);
 
@@ -147,12 +149,14 @@ EXPORT_SYMBOL_GPL(wakeup_source_add);
  */
 void wakeup_source_remove(struct wakeup_source *ws)
 {
+       unsigned long flags;
+
        if (WARN_ON(!ws))
                return;
 
-       spin_lock_irq(&events_lock);
+       spin_lock_irqsave(&events_lock, flags);
        list_del_rcu(&ws->entry);
-       spin_unlock_irq(&events_lock);
+       spin_unlock_irqrestore(&events_lock, flags);
        synchronize_rcu();
 }
 EXPORT_SYMBOL_GPL(wakeup_source_remove);
@@ -649,6 +653,31 @@ void pm_wakeup_event(struct device *dev, unsigned int msec)
 }
 EXPORT_SYMBOL_GPL(pm_wakeup_event);
 
+static void print_active_wakeup_sources(void)
+{
+       struct wakeup_source *ws;
+       int active = 0;
+       struct wakeup_source *last_activity_ws = NULL;
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
+               if (ws->active) {
+                       pr_info("active wakeup source: %s\n", ws->name);
+                       active = 1;
+               } else if (!active &&
+                          (!last_activity_ws ||
+                           ktime_to_ns(ws->last_time) >
+                           ktime_to_ns(last_activity_ws->last_time))) {
+                       last_activity_ws = ws;
+               }
+       }
+
+       if (!active && last_activity_ws)
+               pr_info("last active wakeup source: %s\n",
+                       last_activity_ws->name);
+       rcu_read_unlock();
+}
+
 /**
  * pm_wakeup_pending - Check if power transition in progress should be aborted.
  *
@@ -671,6 +700,10 @@ bool pm_wakeup_pending(void)
                events_check_enabled = !ret;
        }
        spin_unlock_irqrestore(&events_lock, flags);
+
+       if (ret)
+               print_active_wakeup_sources();
+
        return ret;
 }
 
@@ -723,15 +756,16 @@ bool pm_get_wakeup_count(unsigned int *count, bool block)
 bool pm_save_wakeup_count(unsigned int count)
 {
        unsigned int cnt, inpr;
+       unsigned long flags;
 
        events_check_enabled = false;
-       spin_lock_irq(&events_lock);
+       spin_lock_irqsave(&events_lock, flags);
        split_counters(&cnt, &inpr);
        if (cnt == count && inpr == 0) {
                saved_count = count;
                events_check_enabled = true;
        }
-       spin_unlock_irq(&events_lock);
+       spin_unlock_irqrestore(&events_lock, flags);
        return events_check_enabled;
 }
 
index 11f36e5021367d7dd7c2b0794241154a0544ad0a..fc2de5528dcc94eb65537baa17b78048a5002827 100644 (file)
@@ -86,6 +86,7 @@ static struct usb_device_id ath3k_table[] = {
 
        /* Atheros AR5BBU22 with sflash firmware */
        { USB_DEVICE(0x0489, 0xE03C) },
+       { USB_DEVICE(0x0489, 0xE036) },
 
        { }     /* Terminating entry */
 };
@@ -109,6 +110,7 @@ static struct usb_device_id ath3k_blist_tbl[] = {
 
        /* Atheros AR5BBU22 with sflash firmware */
        { USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0489, 0xE036), .driver_info = BTUSB_ATH3012 },
 
        { }     /* Terminating entry */
 };
index cef3bac1a543d83113b54939585f807748accaf7..654e248763efb98024bd81b57118cd2ad1d77cdf 100644 (file)
@@ -52,6 +52,9 @@ static struct usb_device_id btusb_table[] = {
        /* Generic Bluetooth USB device */
        { USB_DEVICE_INFO(0xe0, 0x01, 0x01) },
 
+       /* Apple-specific (Broadcom) devices */
+       { USB_VENDOR_AND_INTERFACE_INFO(0x05ac, 0xff, 0x01, 0x01) },
+
        /* Broadcom SoftSailing reporting vendor specific */
        { USB_DEVICE(0x0a5c, 0x21e1) },
 
@@ -94,16 +97,14 @@ static struct usb_device_id btusb_table[] = {
 
        /* Broadcom BCM20702A0 */
        { USB_DEVICE(0x0489, 0xe042) },
-       { USB_DEVICE(0x0a5c, 0x21e3) },
-       { USB_DEVICE(0x0a5c, 0x21e6) },
-       { USB_DEVICE(0x0a5c, 0x21e8) },
-       { USB_DEVICE(0x0a5c, 0x21f3) },
-       { USB_DEVICE(0x0a5c, 0x21f4) },
        { USB_DEVICE(0x413c, 0x8197) },
 
        /* Foxconn - Hon Hai */
        { USB_DEVICE(0x0489, 0xe033) },
 
+       /*Broadcom devices with vendor specific id */
+       { USB_VENDOR_AND_INTERFACE_INFO(0x0a5c, 0xff, 0x01, 0x01) },
+
        { }     /* Terminating entry */
 };
 
@@ -141,6 +142,7 @@ static struct usb_device_id blacklist_table[] = {
 
        /* Atheros AR5BBU12 with sflash firmware */
        { USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0489, 0xe036), .driver_info = BTUSB_ATH3012 },
 
        /* Broadcom BCM2035 */
        { USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU },
index 98b06baafcc64dd95c2a16965ea261210080c324..a5f7829f27993b8fefea105357229413b78e5309 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/slab.h>
 #include <linux/module.h>
 #include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
 
 struct sh_cmt_priv {
        void __iomem *mapbase;
@@ -52,6 +53,7 @@ struct sh_cmt_priv {
        struct clock_event_device ced;
        struct clocksource cs;
        unsigned long total_cycles;
+       bool cs_enabled;
 };
 
 static DEFINE_RAW_SPINLOCK(sh_cmt_lock);
@@ -155,6 +157,9 @@ static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
 {
        int k, ret;
 
+       pm_runtime_get_sync(&p->pdev->dev);
+       dev_pm_syscore_device(&p->pdev->dev, true);
+
        /* enable clock */
        ret = clk_enable(p->clk);
        if (ret) {
@@ -221,6 +226,9 @@ static void sh_cmt_disable(struct sh_cmt_priv *p)
 
        /* stop clock */
        clk_disable(p->clk);
+
+       dev_pm_syscore_device(&p->pdev->dev, false);
+       pm_runtime_put(&p->pdev->dev);
 }
 
 /* private flags */
@@ -451,22 +459,42 @@ static int sh_cmt_clocksource_enable(struct clocksource *cs)
        int ret;
        struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
 
+       WARN_ON(p->cs_enabled);
+
        p->total_cycles = 0;
 
        ret = sh_cmt_start(p, FLAG_CLOCKSOURCE);
-       if (!ret)
+       if (!ret) {
                __clocksource_updatefreq_hz(cs, p->rate);
+               p->cs_enabled = true;
+       }
        return ret;
 }
 
 static void sh_cmt_clocksource_disable(struct clocksource *cs)
 {
-       sh_cmt_stop(cs_to_sh_cmt(cs), FLAG_CLOCKSOURCE);
+       struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
+
+       WARN_ON(!p->cs_enabled);
+
+       sh_cmt_stop(p, FLAG_CLOCKSOURCE);
+       p->cs_enabled = false;
+}
+
+static void sh_cmt_clocksource_suspend(struct clocksource *cs)
+{
+       struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
+
+       sh_cmt_stop(p, FLAG_CLOCKSOURCE);
+       pm_genpd_syscore_poweroff(&p->pdev->dev);
 }
 
 static void sh_cmt_clocksource_resume(struct clocksource *cs)
 {
-       sh_cmt_start(cs_to_sh_cmt(cs), FLAG_CLOCKSOURCE);
+       struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
+
+       pm_genpd_syscore_poweron(&p->pdev->dev);
+       sh_cmt_start(p, FLAG_CLOCKSOURCE);
 }
 
 static int sh_cmt_register_clocksource(struct sh_cmt_priv *p,
@@ -479,7 +507,7 @@ static int sh_cmt_register_clocksource(struct sh_cmt_priv *p,
        cs->read = sh_cmt_clocksource_read;
        cs->enable = sh_cmt_clocksource_enable;
        cs->disable = sh_cmt_clocksource_disable;
-       cs->suspend = sh_cmt_clocksource_disable;
+       cs->suspend = sh_cmt_clocksource_suspend;
        cs->resume = sh_cmt_clocksource_resume;
        cs->mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8);
        cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
@@ -562,6 +590,16 @@ static int sh_cmt_clock_event_next(unsigned long delta,
        return 0;
 }
 
+static void sh_cmt_clock_event_suspend(struct clock_event_device *ced)
+{
+       pm_genpd_syscore_poweroff(&ced_to_sh_cmt(ced)->pdev->dev);
+}
+
+static void sh_cmt_clock_event_resume(struct clock_event_device *ced)
+{
+       pm_genpd_syscore_poweron(&ced_to_sh_cmt(ced)->pdev->dev);
+}
+
 static void sh_cmt_register_clockevent(struct sh_cmt_priv *p,
                                       char *name, unsigned long rating)
 {
@@ -576,6 +614,8 @@ static void sh_cmt_register_clockevent(struct sh_cmt_priv *p,
        ced->cpumask = cpumask_of(0);
        ced->set_next_event = sh_cmt_clock_event_next;
        ced->set_mode = sh_cmt_clock_event_mode;
+       ced->suspend = sh_cmt_clock_event_suspend;
+       ced->resume = sh_cmt_clock_event_resume;
 
        dev_info(&p->pdev->dev, "used for clock events\n");
        clockevents_register_device(ced);
@@ -670,6 +710,7 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
                dev_err(&p->pdev->dev, "registration failed\n");
                goto err1;
        }
+       p->cs_enabled = false;
 
        ret = setup_irq(irq, &p->irqaction);
        if (ret) {
@@ -688,14 +729,17 @@ err0:
 static int __devinit sh_cmt_probe(struct platform_device *pdev)
 {
        struct sh_cmt_priv *p = platform_get_drvdata(pdev);
+       struct sh_timer_config *cfg = pdev->dev.platform_data;
        int ret;
 
-       if (!is_early_platform_device(pdev))
-               pm_genpd_dev_always_on(&pdev->dev, true);
+       if (!is_early_platform_device(pdev)) {
+               pm_runtime_set_active(&pdev->dev);
+               pm_runtime_enable(&pdev->dev);
+       }
 
        if (p) {
                dev_info(&pdev->dev, "kept as earlytimer\n");
-               return 0;
+               goto out;
        }
 
        p = kmalloc(sizeof(*p), GFP_KERNEL);
@@ -708,8 +752,19 @@ static int __devinit sh_cmt_probe(struct platform_device *pdev)
        if (ret) {
                kfree(p);
                platform_set_drvdata(pdev, NULL);
+               pm_runtime_idle(&pdev->dev);
+               return ret;
        }
-       return ret;
+       if (is_early_platform_device(pdev))
+               return 0;
+
+ out:
+       if (cfg->clockevent_rating || cfg->clocksource_rating)
+               pm_runtime_irq_safe(&pdev->dev);
+       else
+               pm_runtime_idle(&pdev->dev);
+
+       return 0;
 }
 
 static int __devexit sh_cmt_remove(struct platform_device *pdev)
index d9b76ca64a611327c0ea79d979482f638e3c14d3..c5eea858054aa4e67195edf69c1b1951fa8cf11c 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/slab.h>
 #include <linux/module.h>
 #include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
 
 struct sh_mtu2_priv {
        void __iomem *mapbase;
@@ -123,6 +124,9 @@ static int sh_mtu2_enable(struct sh_mtu2_priv *p)
 {
        int ret;
 
+       pm_runtime_get_sync(&p->pdev->dev);
+       dev_pm_syscore_device(&p->pdev->dev, true);
+
        /* enable clock */
        ret = clk_enable(p->clk);
        if (ret) {
@@ -157,6 +161,9 @@ static void sh_mtu2_disable(struct sh_mtu2_priv *p)
 
        /* stop clock */
        clk_disable(p->clk);
+
+       dev_pm_syscore_device(&p->pdev->dev, false);
+       pm_runtime_put(&p->pdev->dev);
 }
 
 static irqreturn_t sh_mtu2_interrupt(int irq, void *dev_id)
@@ -208,6 +215,16 @@ static void sh_mtu2_clock_event_mode(enum clock_event_mode mode,
        }
 }
 
+static void sh_mtu2_clock_event_suspend(struct clock_event_device *ced)
+{
+       pm_genpd_syscore_poweroff(&ced_to_sh_mtu2(ced)->pdev->dev);
+}
+
+static void sh_mtu2_clock_event_resume(struct clock_event_device *ced)
+{
+       pm_genpd_syscore_poweron(&ced_to_sh_mtu2(ced)->pdev->dev);
+}
+
 static void sh_mtu2_register_clockevent(struct sh_mtu2_priv *p,
                                       char *name, unsigned long rating)
 {
@@ -221,6 +238,8 @@ static void sh_mtu2_register_clockevent(struct sh_mtu2_priv *p,
        ced->rating = rating;
        ced->cpumask = cpumask_of(0);
        ced->set_mode = sh_mtu2_clock_event_mode;
+       ced->suspend = sh_mtu2_clock_event_suspend;
+       ced->resume = sh_mtu2_clock_event_resume;
 
        dev_info(&p->pdev->dev, "used for clock events\n");
        clockevents_register_device(ced);
@@ -305,14 +324,17 @@ static int sh_mtu2_setup(struct sh_mtu2_priv *p, struct platform_device *pdev)
 static int __devinit sh_mtu2_probe(struct platform_device *pdev)
 {
        struct sh_mtu2_priv *p = platform_get_drvdata(pdev);
+       struct sh_timer_config *cfg = pdev->dev.platform_data;
        int ret;
 
-       if (!is_early_platform_device(pdev))
-               pm_genpd_dev_always_on(&pdev->dev, true);
+       if (!is_early_platform_device(pdev)) {
+               pm_runtime_set_active(&pdev->dev);
+               pm_runtime_enable(&pdev->dev);
+       }
 
        if (p) {
                dev_info(&pdev->dev, "kept as earlytimer\n");
-               return 0;
+               goto out;
        }
 
        p = kmalloc(sizeof(*p), GFP_KERNEL);
@@ -325,8 +347,19 @@ static int __devinit sh_mtu2_probe(struct platform_device *pdev)
        if (ret) {
                kfree(p);
                platform_set_drvdata(pdev, NULL);
+               pm_runtime_idle(&pdev->dev);
+               return ret;
        }
-       return ret;
+       if (is_early_platform_device(pdev))
+               return 0;
+
+ out:
+       if (cfg->clockevent_rating)
+               pm_runtime_irq_safe(&pdev->dev);
+       else
+               pm_runtime_idle(&pdev->dev);
+
+       return 0;
 }
 
 static int __devexit sh_mtu2_remove(struct platform_device *pdev)
index c1b51d49d106e90d8f4927cf174983878ad58b2a..0cc4add882795ed048e5b98a9e215a758b8ad147 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/slab.h>
 #include <linux/module.h>
 #include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
 
 struct sh_tmu_priv {
        void __iomem *mapbase;
@@ -43,6 +44,8 @@ struct sh_tmu_priv {
        unsigned long periodic;
        struct clock_event_device ced;
        struct clocksource cs;
+       bool cs_enabled;
+       unsigned int enable_count;
 };
 
 static DEFINE_RAW_SPINLOCK(sh_tmu_lock);
@@ -107,7 +110,7 @@ static void sh_tmu_start_stop_ch(struct sh_tmu_priv *p, int start)
        raw_spin_unlock_irqrestore(&sh_tmu_lock, flags);
 }
 
-static int sh_tmu_enable(struct sh_tmu_priv *p)
+static int __sh_tmu_enable(struct sh_tmu_priv *p)
 {
        int ret;
 
@@ -135,7 +138,18 @@ static int sh_tmu_enable(struct sh_tmu_priv *p)
        return 0;
 }
 
-static void sh_tmu_disable(struct sh_tmu_priv *p)
+static int sh_tmu_enable(struct sh_tmu_priv *p)
+{
+       if (p->enable_count++ > 0)
+               return 0;
+
+       pm_runtime_get_sync(&p->pdev->dev);
+       dev_pm_syscore_device(&p->pdev->dev, true);
+
+       return __sh_tmu_enable(p);
+}
+
+static void __sh_tmu_disable(struct sh_tmu_priv *p)
 {
        /* disable channel */
        sh_tmu_start_stop_ch(p, 0);
@@ -147,6 +161,20 @@ static void sh_tmu_disable(struct sh_tmu_priv *p)
        clk_disable(p->clk);
 }
 
+static void sh_tmu_disable(struct sh_tmu_priv *p)
+{
+       if (WARN_ON(p->enable_count == 0))
+               return;
+
+       if (--p->enable_count > 0)
+               return;
+
+       __sh_tmu_disable(p);
+
+       dev_pm_syscore_device(&p->pdev->dev, false);
+       pm_runtime_put(&p->pdev->dev);
+}
+
 static void sh_tmu_set_next(struct sh_tmu_priv *p, unsigned long delta,
                            int periodic)
 {
@@ -203,15 +231,53 @@ static int sh_tmu_clocksource_enable(struct clocksource *cs)
        struct sh_tmu_priv *p = cs_to_sh_tmu(cs);
        int ret;
 
+       if (WARN_ON(p->cs_enabled))
+               return 0;
+
        ret = sh_tmu_enable(p);
-       if (!ret)
+       if (!ret) {
                __clocksource_updatefreq_hz(cs, p->rate);
+               p->cs_enabled = true;
+       }
+
        return ret;
 }
 
 static void sh_tmu_clocksource_disable(struct clocksource *cs)
 {
-       sh_tmu_disable(cs_to_sh_tmu(cs));
+       struct sh_tmu_priv *p = cs_to_sh_tmu(cs);
+
+       if (WARN_ON(!p->cs_enabled))
+               return;
+
+       sh_tmu_disable(p);
+       p->cs_enabled = false;
+}
+
+static void sh_tmu_clocksource_suspend(struct clocksource *cs)
+{
+       struct sh_tmu_priv *p = cs_to_sh_tmu(cs);
+
+       if (!p->cs_enabled)
+               return;
+
+       if (--p->enable_count == 0) {
+               __sh_tmu_disable(p);
+               pm_genpd_syscore_poweroff(&p->pdev->dev);
+       }
+}
+
+static void sh_tmu_clocksource_resume(struct clocksource *cs)
+{
+       struct sh_tmu_priv *p = cs_to_sh_tmu(cs);
+
+       if (!p->cs_enabled)
+               return;
+
+       if (p->enable_count++ == 0) {
+               pm_genpd_syscore_poweron(&p->pdev->dev);
+               __sh_tmu_enable(p);
+       }
 }
 
 static int sh_tmu_register_clocksource(struct sh_tmu_priv *p,
@@ -224,6 +290,8 @@ static int sh_tmu_register_clocksource(struct sh_tmu_priv *p,
        cs->read = sh_tmu_clocksource_read;
        cs->enable = sh_tmu_clocksource_enable;
        cs->disable = sh_tmu_clocksource_disable;
+       cs->suspend = sh_tmu_clocksource_suspend;
+       cs->resume = sh_tmu_clocksource_resume;
        cs->mask = CLOCKSOURCE_MASK(32);
        cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
 
@@ -301,6 +369,16 @@ static int sh_tmu_clock_event_next(unsigned long delta,
        return 0;
 }
 
+static void sh_tmu_clock_event_suspend(struct clock_event_device *ced)
+{
+       pm_genpd_syscore_poweroff(&ced_to_sh_tmu(ced)->pdev->dev);
+}
+
+static void sh_tmu_clock_event_resume(struct clock_event_device *ced)
+{
+       pm_genpd_syscore_poweron(&ced_to_sh_tmu(ced)->pdev->dev);
+}
+
 static void sh_tmu_register_clockevent(struct sh_tmu_priv *p,
                                       char *name, unsigned long rating)
 {
@@ -316,6 +394,8 @@ static void sh_tmu_register_clockevent(struct sh_tmu_priv *p,
        ced->cpumask = cpumask_of(0);
        ced->set_next_event = sh_tmu_clock_event_next;
        ced->set_mode = sh_tmu_clock_event_mode;
+       ced->suspend = sh_tmu_clock_event_suspend;
+       ced->resume = sh_tmu_clock_event_resume;
 
        dev_info(&p->pdev->dev, "used for clock events\n");
 
@@ -392,6 +472,8 @@ static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev)
                ret = PTR_ERR(p->clk);
                goto err1;
        }
+       p->cs_enabled = false;
+       p->enable_count = 0;
 
        return sh_tmu_register(p, (char *)dev_name(&p->pdev->dev),
                               cfg->clockevent_rating,
@@ -405,14 +487,17 @@ static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev)
 static int __devinit sh_tmu_probe(struct platform_device *pdev)
 {
        struct sh_tmu_priv *p = platform_get_drvdata(pdev);
+       struct sh_timer_config *cfg = pdev->dev.platform_data;
        int ret;
 
-       if (!is_early_platform_device(pdev))
-               pm_genpd_dev_always_on(&pdev->dev, true);
+       if (!is_early_platform_device(pdev)) {
+               pm_runtime_set_active(&pdev->dev);
+               pm_runtime_enable(&pdev->dev);
+       }
 
        if (p) {
                dev_info(&pdev->dev, "kept as earlytimer\n");
-               return 0;
+               goto out;
        }
 
        p = kmalloc(sizeof(*p), GFP_KERNEL);
@@ -425,8 +510,19 @@ static int __devinit sh_tmu_probe(struct platform_device *pdev)
        if (ret) {
                kfree(p);
                platform_set_drvdata(pdev, NULL);
+               pm_runtime_idle(&pdev->dev);
+               return ret;
        }
-       return ret;
+       if (is_early_platform_device(pdev))
+               return 0;
+
+ out:
+       if (cfg->clockevent_rating || cfg->clocksource_rating)
+               pm_runtime_irq_safe(&pdev->dev);
+       else
+               pm_runtime_idle(&pdev->dev);
+
+       return 0;
 }
 
 static int __devexit sh_tmu_remove(struct platform_device *pdev)
index e24a2a1b666661aab161c508bc6696bd3fe7979f..ea512f47b789b49c842a78388f15957fb65d62be 100644 (file)
@@ -179,6 +179,17 @@ config CPU_FREQ_GOV_CONSERVATIVE
 
          If in doubt, say N.
 
+config GENERIC_CPUFREQ_CPU0
+       bool "Generic CPU0 cpufreq driver"
+       depends on HAVE_CLK && REGULATOR && PM_OPP && OF
+       select CPU_FREQ_TABLE
+       help
+         This adds a generic cpufreq driver for CPU0 frequency management.
+         It supports both uniprocessor (UP) and symmetric multiprocessor (SMP)
+         systems which share clock and voltage across all CPUs.
+
+         If in doubt, say N.
+
 menu "x86 CPU frequency scaling drivers"
 depends on X86
 source "drivers/cpufreq/Kconfig.x86"
index 78ff7ee48951b6d6d488f0da4d40e47929608f24..934854ae5eb4a436205d644c76c8011a5c41de3e 100644 (file)
@@ -23,7 +23,8 @@ config X86_ACPI_CPUFREQ
        help
          This driver adds a CPUFreq driver which utilizes the ACPI
          Processor Performance States.
-         This driver also supports Intel Enhanced Speedstep.
+         This driver also supports Intel Enhanced Speedstep and newer
+         AMD CPUs.
 
          To compile this driver as a module, choose M here: the
          module will be called acpi-cpufreq.
@@ -32,6 +33,18 @@ config X86_ACPI_CPUFREQ
 
          If in doubt, say N.
 
+config X86_ACPI_CPUFREQ_CPB
+       default y
+       bool "Legacy cpb sysfs knob support for AMD CPUs"
+       depends on X86_ACPI_CPUFREQ && CPU_SUP_AMD
+       help
+         The powernow-k8 driver used to provide a sysfs knob called "cpb"
+         to disable the Core Performance Boosting feature of AMD CPUs. This
+         file has now been superseeded by the more generic "boost" entry.
+
+         By enabling this option the acpi_cpufreq driver provides the old
+         entry in addition to the new boost ones, for compatibility reasons.
+
 config ELAN_CPUFREQ
        tristate "AMD Elan SC400 and SC410"
        select CPU_FREQ_TABLE
@@ -95,7 +108,8 @@ config X86_POWERNOW_K8
        select CPU_FREQ_TABLE
        depends on ACPI && ACPI_PROCESSOR
        help
-         This adds the CPUFreq driver for K8/K10 Opteron/Athlon64 processors.
+         This adds the CPUFreq driver for K8/early Opteron/Athlon64 processors.
+         Support for K10 and newer processors is now in acpi-cpufreq.
 
          To compile this driver as a module, choose M here: the
          module will be called powernow-k8.
index 9531fc2eda22d15591f8ce867950d84a6cadbca9..1bc90e1306d88e0582b76c83abf6bfe98983ea35 100644 (file)
@@ -13,13 +13,15 @@ obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE)     += cpufreq_conservative.o
 # CPUfreq cross-arch helpers
 obj-$(CONFIG_CPU_FREQ_TABLE)           += freq_table.o
 
+obj-$(CONFIG_GENERIC_CPUFREQ_CPU0)     += cpufreq-cpu0.o
+
 ##################################################################################
 # x86 drivers.
 # Link order matters. K8 is preferred to ACPI because of firmware bugs in early
 # K8 systems. ACPI is preferred to all other hardware-specific drivers.
 # speedstep-* is preferred over p4-clockmod.
 
-obj-$(CONFIG_X86_POWERNOW_K8)          += powernow-k8.o mperf.o
+obj-$(CONFIG_X86_POWERNOW_K8)          += powernow-k8.o
 obj-$(CONFIG_X86_ACPI_CPUFREQ)         += acpi-cpufreq.o mperf.o
 obj-$(CONFIG_X86_PCC_CPUFREQ)          += pcc-cpufreq.o
 obj-$(CONFIG_X86_POWERNOW_K6)          += powernow-k6.o
index 56c6c6b4eb4d61043bfdd9b61a95e71ee34f424a..0d048f6a2b23a3bf9476a658cac846ccd321bd3c 100644 (file)
@@ -51,13 +51,19 @@ MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski");
 MODULE_DESCRIPTION("ACPI Processor P-States Driver");
 MODULE_LICENSE("GPL");
 
+#define PFX "acpi-cpufreq: "
+
 enum {
        UNDEFINED_CAPABLE = 0,
        SYSTEM_INTEL_MSR_CAPABLE,
+       SYSTEM_AMD_MSR_CAPABLE,
        SYSTEM_IO_CAPABLE,
 };
 
 #define INTEL_MSR_RANGE                (0xffff)
+#define AMD_MSR_RANGE          (0x7)
+
+#define MSR_K7_HWCR_CPB_DIS    (1ULL << 25)
 
 struct acpi_cpufreq_data {
        struct acpi_processor_performance *acpi_data;
@@ -74,6 +80,116 @@ static struct acpi_processor_performance __percpu *acpi_perf_data;
 static struct cpufreq_driver acpi_cpufreq_driver;
 
 static unsigned int acpi_pstate_strict;
+static bool boost_enabled, boost_supported;
+static struct msr __percpu *msrs;
+
+static bool boost_state(unsigned int cpu)
+{
+       u32 lo, hi;
+       u64 msr;
+
+       switch (boot_cpu_data.x86_vendor) {
+       case X86_VENDOR_INTEL:
+               rdmsr_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &lo, &hi);
+               msr = lo | ((u64)hi << 32);
+               return !(msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
+       case X86_VENDOR_AMD:
+               rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
+               msr = lo | ((u64)hi << 32);
+               return !(msr & MSR_K7_HWCR_CPB_DIS);
+       }
+       return false;
+}
+
+static void boost_set_msrs(bool enable, const struct cpumask *cpumask)
+{
+       u32 cpu;
+       u32 msr_addr;
+       u64 msr_mask;
+
+       switch (boot_cpu_data.x86_vendor) {
+       case X86_VENDOR_INTEL:
+               msr_addr = MSR_IA32_MISC_ENABLE;
+               msr_mask = MSR_IA32_MISC_ENABLE_TURBO_DISABLE;
+               break;
+       case X86_VENDOR_AMD:
+               msr_addr = MSR_K7_HWCR;
+               msr_mask = MSR_K7_HWCR_CPB_DIS;
+               break;
+       default:
+               return;
+       }
+
+       rdmsr_on_cpus(cpumask, msr_addr, msrs);
+
+       for_each_cpu(cpu, cpumask) {
+               struct msr *reg = per_cpu_ptr(msrs, cpu);
+               if (enable)
+                       reg->q &= ~msr_mask;
+               else
+                       reg->q |= msr_mask;
+       }
+
+       wrmsr_on_cpus(cpumask, msr_addr, msrs);
+}
+
+static ssize_t _store_boost(const char *buf, size_t count)
+{
+       int ret;
+       unsigned long val = 0;
+
+       if (!boost_supported)
+               return -EINVAL;
+
+       ret = kstrtoul(buf, 10, &val);
+       if (ret || (val > 1))
+               return -EINVAL;
+
+       if ((val && boost_enabled) || (!val && !boost_enabled))
+               return count;
+
+       get_online_cpus();
+
+       boost_set_msrs(val, cpu_online_mask);
+
+       put_online_cpus();
+
+       boost_enabled = val;
+       pr_debug("Core Boosting %sabled.\n", val ? "en" : "dis");
+
+       return count;
+}
+
+static ssize_t store_global_boost(struct kobject *kobj, struct attribute *attr,
+                                 const char *buf, size_t count)
+{
+       return _store_boost(buf, count);
+}
+
+static ssize_t show_global_boost(struct kobject *kobj,
+                                struct attribute *attr, char *buf)
+{
+       return sprintf(buf, "%u\n", boost_enabled);
+}
+
+static struct global_attr global_boost = __ATTR(boost, 0644,
+                                               show_global_boost,
+                                               store_global_boost);
+
+#ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
+static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf,
+                        size_t count)
+{
+       return _store_boost(buf, count);
+}
+
+static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf)
+{
+       return sprintf(buf, "%u\n", boost_enabled);
+}
+
+static struct freq_attr cpb = __ATTR(cpb, 0644, show_cpb, store_cpb);
+#endif
 
 static int check_est_cpu(unsigned int cpuid)
 {
@@ -82,6 +198,13 @@ static int check_est_cpu(unsigned int cpuid)
        return cpu_has(cpu, X86_FEATURE_EST);
 }
 
+static int check_amd_hwpstate_cpu(unsigned int cpuid)
+{
+       struct cpuinfo_x86 *cpu = &cpu_data(cpuid);
+
+       return cpu_has(cpu, X86_FEATURE_HW_PSTATE);
+}
+
 static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data)
 {
        struct acpi_processor_performance *perf;
@@ -101,7 +224,11 @@ static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data)
        int i;
        struct acpi_processor_performance *perf;
 
-       msr &= INTEL_MSR_RANGE;
+       if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+               msr &= AMD_MSR_RANGE;
+       else
+               msr &= INTEL_MSR_RANGE;
+
        perf = data->acpi_data;
 
        for (i = 0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
@@ -115,6 +242,7 @@ static unsigned extract_freq(u32 val, struct acpi_cpufreq_data *data)
 {
        switch (data->cpu_feature) {
        case SYSTEM_INTEL_MSR_CAPABLE:
+       case SYSTEM_AMD_MSR_CAPABLE:
                return extract_msr(val, data);
        case SYSTEM_IO_CAPABLE:
                return extract_io(val, data);
@@ -150,6 +278,7 @@ static void do_drv_read(void *_cmd)
 
        switch (cmd->type) {
        case SYSTEM_INTEL_MSR_CAPABLE:
+       case SYSTEM_AMD_MSR_CAPABLE:
                rdmsr(cmd->addr.msr.reg, cmd->val, h);
                break;
        case SYSTEM_IO_CAPABLE:
@@ -174,6 +303,9 @@ static void do_drv_write(void *_cmd)
                lo = (lo & ~INTEL_MSR_RANGE) | (cmd->val & INTEL_MSR_RANGE);
                wrmsr(cmd->addr.msr.reg, lo, hi);
                break;
+       case SYSTEM_AMD_MSR_CAPABLE:
+               wrmsr(cmd->addr.msr.reg, cmd->val, 0);
+               break;
        case SYSTEM_IO_CAPABLE:
                acpi_os_write_port((acpi_io_address)cmd->addr.io.port,
                                cmd->val,
@@ -217,6 +349,10 @@ static u32 get_cur_val(const struct cpumask *mask)
                cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
                cmd.addr.msr.reg = MSR_IA32_PERF_STATUS;
                break;
+       case SYSTEM_AMD_MSR_CAPABLE:
+               cmd.type = SYSTEM_AMD_MSR_CAPABLE;
+               cmd.addr.msr.reg = MSR_AMD_PERF_STATUS;
+               break;
        case SYSTEM_IO_CAPABLE:
                cmd.type = SYSTEM_IO_CAPABLE;
                perf = per_cpu(acfreq_data, cpumask_first(mask))->acpi_data;
@@ -326,6 +462,11 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
                cmd.addr.msr.reg = MSR_IA32_PERF_CTL;
                cmd.val = (u32) perf->states[next_perf_state].control;
                break;
+       case SYSTEM_AMD_MSR_CAPABLE:
+               cmd.type = SYSTEM_AMD_MSR_CAPABLE;
+               cmd.addr.msr.reg = MSR_AMD_PERF_CTL;
+               cmd.val = (u32) perf->states[next_perf_state].control;
+               break;
        case SYSTEM_IO_CAPABLE:
                cmd.type = SYSTEM_IO_CAPABLE;
                cmd.addr.io.port = perf->control_register.address;
@@ -419,6 +560,44 @@ static void free_acpi_perf_data(void)
        free_percpu(acpi_perf_data);
 }
 
+static int boost_notify(struct notifier_block *nb, unsigned long action,
+                     void *hcpu)
+{
+       unsigned cpu = (long)hcpu;
+       const struct cpumask *cpumask;
+
+       cpumask = get_cpu_mask(cpu);
+
+       /*
+        * Clear the boost-disable bit on the CPU_DOWN path so that
+        * this cpu cannot block the remaining ones from boosting. On
+        * the CPU_UP path we simply keep the boost-disable flag in
+        * sync with the current global state.
+        */
+
+       switch (action) {
+       case CPU_UP_PREPARE:
+       case CPU_UP_PREPARE_FROZEN:
+               boost_set_msrs(boost_enabled, cpumask);
+               break;
+
+       case CPU_DOWN_PREPARE:
+       case CPU_DOWN_PREPARE_FROZEN:
+               boost_set_msrs(1, cpumask);
+               break;
+
+       default:
+               break;
+       }
+
+       return NOTIFY_OK;
+}
+
+
+static struct notifier_block boost_nb = {
+       .notifier_call          = boost_notify,
+};
+
 /*
  * acpi_cpufreq_early_init - initialize ACPI P-States library
  *
@@ -559,6 +738,14 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
                policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
                cpumask_copy(policy->cpus, cpu_core_mask(cpu));
        }
+
+       if (check_amd_hwpstate_cpu(cpu) && !acpi_pstate_strict) {
+               cpumask_clear(policy->cpus);
+               cpumask_set_cpu(cpu, policy->cpus);
+               cpumask_copy(policy->related_cpus, cpu_sibling_mask(cpu));
+               policy->shared_type = CPUFREQ_SHARED_TYPE_HW;
+               pr_info_once(PFX "overriding BIOS provided _PSD data\n");
+       }
 #endif
 
        /* capability check */
@@ -580,12 +767,16 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
                break;
        case ACPI_ADR_SPACE_FIXED_HARDWARE:
                pr_debug("HARDWARE addr space\n");
-               if (!check_est_cpu(cpu)) {
-                       result = -ENODEV;
-                       goto err_unreg;
+               if (check_est_cpu(cpu)) {
+                       data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE;
+                       break;
                }
-               data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE;
-               break;
+               if (check_amd_hwpstate_cpu(cpu)) {
+                       data->cpu_feature = SYSTEM_AMD_MSR_CAPABLE;
+                       break;
+               }
+               result = -ENODEV;
+               goto err_unreg;
        default:
                pr_debug("Unknown addr space %d\n",
                        (u32) (perf->control_register.space_id));
@@ -718,6 +909,7 @@ static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
 
 static struct freq_attr *acpi_cpufreq_attr[] = {
        &cpufreq_freq_attr_scaling_available_freqs,
+       NULL,   /* this is a placeholder for cpb, do not remove */
        NULL,
 };
 
@@ -733,6 +925,49 @@ static struct cpufreq_driver acpi_cpufreq_driver = {
        .attr           = acpi_cpufreq_attr,
 };
 
+static void __init acpi_cpufreq_boost_init(void)
+{
+       if (boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA)) {
+               msrs = msrs_alloc();
+
+               if (!msrs)
+                       return;
+
+               boost_supported = true;
+               boost_enabled = boost_state(0);
+
+               get_online_cpus();
+
+               /* Force all MSRs to the same value */
+               boost_set_msrs(boost_enabled, cpu_online_mask);
+
+               register_cpu_notifier(&boost_nb);
+
+               put_online_cpus();
+       } else
+               global_boost.attr.mode = 0444;
+
+       /* We create the boost file in any case, though for systems without
+        * hardware support it will be read-only and hardwired to return 0.
+        */
+       if (sysfs_create_file(cpufreq_global_kobject, &(global_boost.attr)))
+               pr_warn(PFX "could not register global boost sysfs file\n");
+       else
+               pr_debug("registered global boost sysfs file\n");
+}
+
+static void __exit acpi_cpufreq_boost_exit(void)
+{
+       sysfs_remove_file(cpufreq_global_kobject, &(global_boost.attr));
+
+       if (msrs) {
+               unregister_cpu_notifier(&boost_nb);
+
+               msrs_free(msrs);
+               msrs = NULL;
+       }
+}
+
 static int __init acpi_cpufreq_init(void)
 {
        int ret;
@@ -746,9 +981,32 @@ static int __init acpi_cpufreq_init(void)
        if (ret)
                return ret;
 
+#ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
+       /* this is a sysfs file with a strange name and an even stranger
+        * semantic - per CPU instantiation, but system global effect.
+        * Lets enable it only on AMD CPUs for compatibility reasons and
+        * only if configured. This is considered legacy code, which
+        * will probably be removed at some point in the future.
+        */
+       if (check_amd_hwpstate_cpu(0)) {
+               struct freq_attr **iter;
+
+               pr_debug("adding sysfs entry for cpb\n");
+
+               for (iter = acpi_cpufreq_attr; *iter != NULL; iter++)
+                       ;
+
+               /* make sure there is a terminator behind it */
+               if (iter[1] == NULL)
+                       *iter = &cpb;
+       }
+#endif
+
        ret = cpufreq_register_driver(&acpi_cpufreq_driver);
        if (ret)
                free_acpi_perf_data();
+       else
+               acpi_cpufreq_boost_init();
 
        return ret;
 }
@@ -757,6 +1015,8 @@ static void __exit acpi_cpufreq_exit(void)
 {
        pr_debug("acpi_cpufreq_exit\n");
 
+       acpi_cpufreq_boost_exit();
+
        cpufreq_unregister_driver(&acpi_cpufreq_driver);
 
        free_acpi_perf_data();
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c
new file mode 100644 (file)
index 0000000..e915827
--- /dev/null
@@ -0,0 +1,269 @@
+/*
+ * Copyright (C) 2012 Freescale Semiconductor, Inc.
+ *
+ * The OPP code in function cpu0_set_target() is reused from
+ * drivers/cpufreq/omap-cpufreq.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt)    KBUILD_MODNAME ": " fmt
+
+#include <linux/clk.h>
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/opp.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+
+static unsigned int transition_latency;
+static unsigned int voltage_tolerance; /* in percentage */
+
+static struct device *cpu_dev;
+static struct clk *cpu_clk;
+static struct regulator *cpu_reg;
+static struct cpufreq_frequency_table *freq_table;
+
+static int cpu0_verify_speed(struct cpufreq_policy *policy)
+{
+       return cpufreq_frequency_table_verify(policy, freq_table);
+}
+
+static unsigned int cpu0_get_speed(unsigned int cpu)
+{
+       return clk_get_rate(cpu_clk) / 1000;
+}
+
+static int cpu0_set_target(struct cpufreq_policy *policy,
+                          unsigned int target_freq, unsigned int relation)
+{
+       struct cpufreq_freqs freqs;
+       struct opp *opp;
+       unsigned long freq_Hz, volt = 0, volt_old = 0, tol = 0;
+       unsigned int index, cpu;
+       int ret;
+
+       ret = cpufreq_frequency_table_target(policy, freq_table, target_freq,
+                                            relation, &index);
+       if (ret) {
+               pr_err("failed to match target freqency %d: %d\n",
+                      target_freq, ret);
+               return ret;
+       }
+
+       freq_Hz = clk_round_rate(cpu_clk, freq_table[index].frequency * 1000);
+       if (freq_Hz < 0)
+               freq_Hz = freq_table[index].frequency * 1000;
+       freqs.new = freq_Hz / 1000;
+       freqs.old = clk_get_rate(cpu_clk) / 1000;
+
+       if (freqs.old == freqs.new)
+               return 0;
+
+       for_each_online_cpu(cpu) {
+               freqs.cpu = cpu;
+               cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+       }
+
+       if (cpu_reg) {
+               opp = opp_find_freq_ceil(cpu_dev, &freq_Hz);
+               if (IS_ERR(opp)) {
+                       pr_err("failed to find OPP for %ld\n", freq_Hz);
+                       return PTR_ERR(opp);
+               }
+               volt = opp_get_voltage(opp);
+               tol = volt * voltage_tolerance / 100;
+               volt_old = regulator_get_voltage(cpu_reg);
+       }
+
+       pr_debug("%u MHz, %ld mV --> %u MHz, %ld mV\n",
+                freqs.old / 1000, volt_old ? volt_old / 1000 : -1,
+                freqs.new / 1000, volt ? volt / 1000 : -1);
+
+       /* scaling up?  scale voltage before frequency */
+       if (cpu_reg && freqs.new > freqs.old) {
+               ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
+               if (ret) {
+                       pr_err("failed to scale voltage up: %d\n", ret);
+                       freqs.new = freqs.old;
+                       return ret;
+               }
+       }
+
+       ret = clk_set_rate(cpu_clk, freqs.new * 1000);
+       if (ret) {
+               pr_err("failed to set clock rate: %d\n", ret);
+               if (cpu_reg)
+                       regulator_set_voltage_tol(cpu_reg, volt_old, tol);
+               return ret;
+       }
+
+       /* scaling down?  scale voltage after frequency */
+       if (cpu_reg && freqs.new < freqs.old) {
+               ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
+               if (ret) {
+                       pr_err("failed to scale voltage down: %d\n", ret);
+                       clk_set_rate(cpu_clk, freqs.old * 1000);
+                       freqs.new = freqs.old;
+                       return ret;
+               }
+       }
+
+       for_each_online_cpu(cpu) {
+               freqs.cpu = cpu;
+               cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+       }
+
+       return 0;
+}
+
+static int cpu0_cpufreq_init(struct cpufreq_policy *policy)
+{
+       int ret;
+
+       if (policy->cpu != 0)
+               return -EINVAL;
+
+       ret = cpufreq_frequency_table_cpuinfo(policy, freq_table);
+       if (ret) {
+               pr_err("invalid frequency table: %d\n", ret);
+               return ret;
+       }
+
+       policy->cpuinfo.transition_latency = transition_latency;
+       policy->cur = clk_get_rate(cpu_clk) / 1000;
+
+       /*
+        * The driver only supports the SMP configuartion where all processors
+        * share the clock and voltage and clock.  Use cpufreq affected_cpus
+        * interface to have all CPUs scaled together.
+        */
+       policy->shared_type = CPUFREQ_SHARED_TYPE_ANY;
+       cpumask_setall(policy->cpus);
+
+       cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
+
+       return 0;
+}
+
+static int cpu0_cpufreq_exit(struct cpufreq_policy *policy)
+{
+       cpufreq_frequency_table_put_attr(policy->cpu);
+
+       return 0;
+}
+
+static struct freq_attr *cpu0_cpufreq_attr[] = {
+       &cpufreq_freq_attr_scaling_available_freqs,
+       NULL,
+};
+
+static struct cpufreq_driver cpu0_cpufreq_driver = {
+       .flags = CPUFREQ_STICKY,
+       .verify = cpu0_verify_speed,
+       .target = cpu0_set_target,
+       .get = cpu0_get_speed,
+       .init = cpu0_cpufreq_init,
+       .exit = cpu0_cpufreq_exit,
+       .name = "generic_cpu0",
+       .attr = cpu0_cpufreq_attr,
+};
+
+static int __devinit cpu0_cpufreq_driver_init(void)
+{
+       struct device_node *np;
+       int ret;
+
+       np = of_find_node_by_path("/cpus/cpu@0");
+       if (!np) {
+               pr_err("failed to find cpu0 node\n");
+               return -ENOENT;
+       }
+
+       cpu_dev = get_cpu_device(0);
+       if (!cpu_dev) {
+               pr_err("failed to get cpu0 device\n");
+               ret = -ENODEV;
+               goto out_put_node;
+       }
+
+       cpu_dev->of_node = np;
+
+       cpu_clk = clk_get(cpu_dev, NULL);
+       if (IS_ERR(cpu_clk)) {
+               ret = PTR_ERR(cpu_clk);
+               pr_err("failed to get cpu0 clock: %d\n", ret);
+               goto out_put_node;
+       }
+
+       cpu_reg = regulator_get(cpu_dev, "cpu0");
+       if (IS_ERR(cpu_reg)) {
+               pr_warn("failed to get cpu0 regulator\n");
+               cpu_reg = NULL;
+       }
+
+       ret = of_init_opp_table(cpu_dev);
+       if (ret) {
+               pr_err("failed to init OPP table: %d\n", ret);
+               goto out_put_node;
+       }
+
+       ret = opp_init_cpufreq_table(cpu_dev, &freq_table);
+       if (ret) {
+               pr_err("failed to init cpufreq table: %d\n", ret);
+               goto out_put_node;
+       }
+
+       of_property_read_u32(np, "voltage-tolerance", &voltage_tolerance);
+
+       if (of_property_read_u32(np, "clock-latency", &transition_latency))
+               transition_latency = CPUFREQ_ETERNAL;
+
+       if (cpu_reg) {
+               struct opp *opp;
+               unsigned long min_uV, max_uV;
+               int i;
+
+               /*
+                * OPP is maintained in order of increasing frequency, and
+                * freq_table initialised from OPP is therefore sorted in the
+                * same order.
+                */
+               for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++)
+                       ;
+               opp = opp_find_freq_exact(cpu_dev,
+                               freq_table[0].frequency * 1000, true);
+               min_uV = opp_get_voltage(opp);
+               opp = opp_find_freq_exact(cpu_dev,
+                               freq_table[i-1].frequency * 1000, true);
+               max_uV = opp_get_voltage(opp);
+               ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV);
+               if (ret > 0)
+                       transition_latency += ret * 1000;
+       }
+
+       ret = cpufreq_register_driver(&cpu0_cpufreq_driver);
+       if (ret) {
+               pr_err("failed register driver: %d\n", ret);
+               goto out_free_table;
+       }
+
+       of_node_put(np);
+       return 0;
+
+out_free_table:
+       opp_free_cpufreq_table(cpu_dev, &freq_table);
+out_put_node:
+       of_node_put(np);
+       return ret;
+}
+late_initcall(cpu0_cpufreq_driver_init);
+
+MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
+MODULE_DESCRIPTION("Generic CPU0 cpufreq driver");
+MODULE_LICENSE("GPL");
index 235a340e81f20bcb63805f9c911f4fd99eafeab8..b75dc2c2f8d3f5e7aa8268579c7f515c1edff029 100644 (file)
@@ -504,6 +504,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
                                j_dbs_info->prev_cpu_nice =
                                                kcpustat_cpu(j).cpustat[CPUTIME_NICE];
                }
+               this_dbs_info->cpu = cpu;
                this_dbs_info->down_skip = 0;
                this_dbs_info->requested_freq = policy->cur;
 
@@ -583,6 +584,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
                        __cpufreq_driver_target(
                                        this_dbs_info->cur_policy,
                                        policy->min, CPUFREQ_RELATION_L);
+               dbs_check_cpu(this_dbs_info);
                mutex_unlock(&this_dbs_info->timer_mutex);
 
                break;
index 836e9b062e5ec4a2e08c935d4c49f17a79ef124b..9479fb33c30fd6e7eb303b9b09dd3afa543f2df0 100644 (file)
@@ -761,6 +761,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
                else if (policy->min > this_dbs_info->cur_policy->cur)
                        __cpufreq_driver_target(this_dbs_info->cur_policy,
                                policy->min, CPUFREQ_RELATION_L);
+               dbs_check_cpu(this_dbs_info);
                mutex_unlock(&this_dbs_info->timer_mutex);
                break;
        }
index cbf48fbca881f534f3eec4eba81d97f788115eac..e2dc436099d1006ed9d879cc802eedda7eae9deb 100644 (file)
@@ -56,7 +56,7 @@ union msr_longhaul {
 /*
  * VIA C3 Samuel 1  & Samuel 2 (stepping 0)
  */
-static const int __cpuinitdata samuel1_mults[16] = {
+static const int __cpuinitconst samuel1_mults[16] = {
        -1, /* 0000 -> RESERVED */
        30, /* 0001 ->  3.0x */
        40, /* 0010 ->  4.0x */
@@ -75,7 +75,7 @@ static const int __cpuinitdata samuel1_mults[16] = {
        -1, /* 1111 -> RESERVED */
 };
 
-static const int __cpuinitdata samuel1_eblcr[16] = {
+static const int __cpuinitconst samuel1_eblcr[16] = {
        50, /* 0000 -> RESERVED */
        30, /* 0001 ->  3.0x */
        40, /* 0010 ->  4.0x */
@@ -97,7 +97,7 @@ static const int __cpuinitdata samuel1_eblcr[16] = {
 /*
  * VIA C3 Samuel2 Stepping 1->15
  */
-static const int __cpuinitdata samuel2_eblcr[16] = {
+static const int __cpuinitconst samuel2_eblcr[16] = {
        50,  /* 0000 ->  5.0x */
        30,  /* 0001 ->  3.0x */
        40,  /* 0010 ->  4.0x */
@@ -119,7 +119,7 @@ static const int __cpuinitdata samuel2_eblcr[16] = {
 /*
  * VIA C3 Ezra
  */
-static const int __cpuinitdata ezra_mults[16] = {
+static const int __cpuinitconst ezra_mults[16] = {
        100, /* 0000 -> 10.0x */
        30,  /* 0001 ->  3.0x */
        40,  /* 0010 ->  4.0x */
@@ -138,7 +138,7 @@ static const int __cpuinitdata ezra_mults[16] = {
        120, /* 1111 -> 12.0x */
 };
 
-static const int __cpuinitdata ezra_eblcr[16] = {
+static const int __cpuinitconst ezra_eblcr[16] = {
        50,  /* 0000 ->  5.0x */
        30,  /* 0001 ->  3.0x */
        40,  /* 0010 ->  4.0x */
@@ -160,7 +160,7 @@ static const int __cpuinitdata ezra_eblcr[16] = {
 /*
  * VIA C3 (Ezra-T) [C5M].
  */
-static const int __cpuinitdata ezrat_mults[32] = {
+static const int __cpuinitconst ezrat_mults[32] = {
        100, /* 0000 -> 10.0x */
        30,  /* 0001 ->  3.0x */
        40,  /* 0010 ->  4.0x */
@@ -196,7 +196,7 @@ static const int __cpuinitdata ezrat_mults[32] = {
        -1,  /* 1111 -> RESERVED (12.0x) */
 };
 
-static const int __cpuinitdata ezrat_eblcr[32] = {
+static const int __cpuinitconst ezrat_eblcr[32] = {
        50,  /* 0000 ->  5.0x */
        30,  /* 0001 ->  3.0x */
        40,  /* 0010 ->  4.0x */
@@ -235,7 +235,7 @@ static const int __cpuinitdata ezrat_eblcr[32] = {
 /*
  * VIA C3 Nehemiah */
 
-static const int __cpuinitdata nehemiah_mults[32] = {
+static const int __cpuinitconst nehemiah_mults[32] = {
        100, /* 0000 -> 10.0x */
        -1, /* 0001 -> 16.0x */
        40,  /* 0010 ->  4.0x */
@@ -270,7 +270,7 @@ static const int __cpuinitdata nehemiah_mults[32] = {
        -1, /* 1111 -> 12.0x */
 };
 
-static const int __cpuinitdata nehemiah_eblcr[32] = {
+static const int __cpuinitconst nehemiah_eblcr[32] = {
        50,  /* 0000 ->  5.0x */
        160, /* 0001 -> 16.0x */
        40,  /* 0010 ->  4.0x */
@@ -315,7 +315,7 @@ struct mV_pos {
        unsigned short pos;
 };
 
-static const struct mV_pos __cpuinitdata vrm85_mV[32] = {
+static const struct mV_pos __cpuinitconst vrm85_mV[32] = {
        {1250, 8},      {1200, 6},      {1150, 4},      {1100, 2},
        {1050, 0},      {1800, 30},     {1750, 28},     {1700, 26},
        {1650, 24},     {1600, 22},     {1550, 20},     {1500, 18},
@@ -326,14 +326,14 @@ static const struct mV_pos __cpuinitdata vrm85_mV[32] = {
        {1475, 17},     {1425, 15},     {1375, 13},     {1325, 11}
 };
 
-static const unsigned char __cpuinitdata mV_vrm85[32] = {
+static const unsigned char __cpuinitconst mV_vrm85[32] = {
        0x04,   0x14,   0x03,   0x13,   0x02,   0x12,   0x01,   0x11,
        0x00,   0x10,   0x0f,   0x1f,   0x0e,   0x1e,   0x0d,   0x1d,
        0x0c,   0x1c,   0x0b,   0x1b,   0x0a,   0x1a,   0x09,   0x19,
        0x08,   0x18,   0x07,   0x17,   0x06,   0x16,   0x05,   0x15
 };
 
-static const struct mV_pos __cpuinitdata mobilevrm_mV[32] = {
+static const struct mV_pos __cpuinitconst mobilevrm_mV[32] = {
        {1750, 31},     {1700, 30},     {1650, 29},     {1600, 28},
        {1550, 27},     {1500, 26},     {1450, 25},     {1400, 24},
        {1350, 23},     {1300, 22},     {1250, 21},     {1200, 20},
@@ -344,7 +344,7 @@ static const struct mV_pos __cpuinitdata mobilevrm_mV[32] = {
        {675, 3},       {650, 2},       {625, 1},       {600, 0}
 };
 
-static const unsigned char __cpuinitdata mV_mobilevrm[32] = {
+static const unsigned char __cpuinitconst mV_mobilevrm[32] = {
        0x1f,   0x1e,   0x1d,   0x1c,   0x1b,   0x1a,   0x19,   0x18,
        0x17,   0x16,   0x15,   0x14,   0x13,   0x12,   0x11,   0x10,
        0x0f,   0x0e,   0x0d,   0x0c,   0x0b,   0x0a,   0x09,   0x08,
index b47034e650a579b9d0263f8d0a03af2ecc8b3aa7..65f8e9a54975e8274c9de6403718f5ff39ee2e85 100644 (file)
 /* OPP tolerance in percentage */
 #define        OPP_TOLERANCE   4
 
-#ifdef CONFIG_SMP
-struct lpj_info {
-       unsigned long   ref;
-       unsigned int    freq;
-};
-
-static DEFINE_PER_CPU(struct lpj_info, lpj_ref);
-static struct lpj_info global_lpj_ref;
-#endif
-
 static struct cpufreq_frequency_table *freq_table;
 static atomic_t freq_table_users = ATOMIC_INIT(0);
 static struct clk *mpu_clk;
@@ -161,31 +151,6 @@ static int omap_target(struct cpufreq_policy *policy,
        }
 
        freqs.new = omap_getspeed(policy->cpu);
-#ifdef CONFIG_SMP
-       /*
-        * Note that loops_per_jiffy is not updated on SMP systems in
-        * cpufreq driver. So, update the per-CPU loops_per_jiffy value
-        * on frequency transition. We need to update all dependent CPUs.
-        */
-       for_each_cpu(i, policy->cpus) {
-               struct lpj_info *lpj = &per_cpu(lpj_ref, i);
-               if (!lpj->freq) {
-                       lpj->ref = per_cpu(cpu_data, i).loops_per_jiffy;
-                       lpj->freq = freqs.old;
-               }
-
-               per_cpu(cpu_data, i).loops_per_jiffy =
-                       cpufreq_scale(lpj->ref, lpj->freq, freqs.new);
-       }
-
-       /* And don't forget to adjust the global one */
-       if (!global_lpj_ref.freq) {
-               global_lpj_ref.ref = loops_per_jiffy;
-               global_lpj_ref.freq = freqs.old;
-       }
-       loops_per_jiffy = cpufreq_scale(global_lpj_ref.ref, global_lpj_ref.freq,
-                                       freqs.new);
-#endif
 
 done:
        /* notifiers */
@@ -301,9 +266,9 @@ static int __init omap_cpufreq_init(void)
        }
 
        mpu_dev = omap_device_get_by_hwmod_name("mpu");
-       if (!mpu_dev) {
+       if (IS_ERR(mpu_dev)) {
                pr_warning("%s: unable to get the mpu device\n", __func__);
-               return -EINVAL;
+               return PTR_ERR(mpu_dev);
        }
 
        mpu_reg = regulator_get(mpu_dev, "vcc");
index c0e816468e300f242735f4825d09b9d291a9b522..0b19faf002eeb30d44c28e56b57a8a907814989d 100644 (file)
 #define PFX "powernow-k8: "
 #define VERSION "version 2.20.00"
 #include "powernow-k8.h"
-#include "mperf.h"
 
 /* serialize freq changes  */
 static DEFINE_MUTEX(fidvid_mutex);
 
 static DEFINE_PER_CPU(struct powernow_k8_data *, powernow_data);
 
-static int cpu_family = CPU_OPTERON;
-
-/* array to map SW pstate number to acpi state */
-static u32 ps_to_as[8];
-
-/* core performance boost */
-static bool cpb_capable, cpb_enabled;
-static struct msr __percpu *msrs;
-
 static struct cpufreq_driver cpufreq_amd64_driver;
 
 #ifndef CONFIG_SMP
@@ -86,12 +76,6 @@ static u32 find_khz_freq_from_fid(u32 fid)
        return 1000 * find_freq_from_fid(fid);
 }
 
-static u32 find_khz_freq_from_pstate(struct cpufreq_frequency_table *data,
-                                    u32 pstate)
-{
-       return data[ps_to_as[pstate]].frequency;
-}
-
 /* Return the vco fid for an input fid
  *
  * Each "low" fid has corresponding "high" fid, and you can get to "low" fids
@@ -114,9 +98,6 @@ static int pending_bit_stuck(void)
 {
        u32 lo, hi;
 
-       if (cpu_family == CPU_HW_PSTATE)
-               return 0;
-
        rdmsr(MSR_FIDVID_STATUS, lo, hi);
        return lo & MSR_S_LO_CHANGE_PENDING ? 1 : 0;
 }
@@ -130,20 +111,6 @@ static int query_current_values_with_pending_wait(struct powernow_k8_data *data)
        u32 lo, hi;
        u32 i = 0;
 
-       if (cpu_family == CPU_HW_PSTATE) {
-               rdmsr(MSR_PSTATE_STATUS, lo, hi);
-               i = lo & HW_PSTATE_MASK;
-               data->currpstate = i;
-
-               /*
-                * a workaround for family 11h erratum 311 might cause
-                * an "out-of-range Pstate if the core is in Pstate-0
-                */
-               if ((boot_cpu_data.x86 == 0x11) && (i >= data->numps))
-                       data->currpstate = HW_PSTATE_0;
-
-               return 0;
-       }
        do {
                if (i++ > 10000) {
                        pr_debug("detected change pending stuck\n");
@@ -300,14 +267,6 @@ static int decrease_vid_code_by_step(struct powernow_k8_data *data,
        return 0;
 }
 
-/* Change hardware pstate by single MSR write */
-static int transition_pstate(struct powernow_k8_data *data, u32 pstate)
-{
-       wrmsr(MSR_PSTATE_CTRL, pstate, 0);
-       data->currpstate = pstate;
-       return 0;
-}
-
 /* Change Opteron/Athlon64 fid and vid, by the 3 phases. */
 static int transition_fid_vid(struct powernow_k8_data *data,
                u32 reqfid, u32 reqvid)
@@ -524,8 +483,6 @@ static int core_voltage_post_transition(struct powernow_k8_data *data,
 static const struct x86_cpu_id powernow_k8_ids[] = {
        /* IO based frequency switching */
        { X86_VENDOR_AMD, 0xf },
-       /* MSR based frequency switching supported */
-       X86_FEATURE_MATCH(X86_FEATURE_HW_PSTATE),
        {}
 };
 MODULE_DEVICE_TABLE(x86cpu, powernow_k8_ids);
@@ -561,15 +518,8 @@ static void check_supported_cpu(void *_rc)
                                "Power state transitions not supported\n");
                        return;
                }
-       } else { /* must be a HW Pstate capable processor */
-               cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx);
-               if ((edx & USE_HW_PSTATE) == USE_HW_PSTATE)
-                       cpu_family = CPU_HW_PSTATE;
-               else
-                       return;
+               *rc = 0;
        }
-
-       *rc = 0;
 }
 
 static int check_pst_table(struct powernow_k8_data *data, struct pst_s *pst,
@@ -633,18 +583,11 @@ static void print_basics(struct powernow_k8_data *data)
        for (j = 0; j < data->numps; j++) {
                if (data->powernow_table[j].frequency !=
                                CPUFREQ_ENTRY_INVALID) {
-                       if (cpu_family == CPU_HW_PSTATE) {
-                               printk(KERN_INFO PFX
-                                       "   %d : pstate %d (%d MHz)\n", j,
-                                       data->powernow_table[j].index,
-                                       data->powernow_table[j].frequency/1000);
-                       } else {
                                printk(KERN_INFO PFX
                                        "fid 0x%x (%d MHz), vid 0x%x\n",
                                        data->powernow_table[j].index & 0xff,
                                        data->powernow_table[j].frequency/1000,
                                        data->powernow_table[j].index >> 8);
-                       }
                }
        }
        if (data->batps)
@@ -652,20 +595,6 @@ static void print_basics(struct powernow_k8_data *data)
                                data->batps);
 }
 
-static u32 freq_from_fid_did(u32 fid, u32 did)
-{
-       u32 mhz = 0;
-
-       if (boot_cpu_data.x86 == 0x10)
-               mhz = (100 * (fid + 0x10)) >> did;
-       else if (boot_cpu_data.x86 == 0x11)
-               mhz = (100 * (fid + 8)) >> did;
-       else
-               BUG();
-
-       return mhz * 1000;
-}
-
 static int fill_powernow_table(struct powernow_k8_data *data,
                struct pst_s *pst, u8 maxvid)
 {
@@ -825,7 +754,7 @@ static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data,
 {
        u64 control;
 
-       if (!data->acpi_data.state_count || (cpu_family == CPU_HW_PSTATE))
+       if (!data->acpi_data.state_count)
                return;
 
        control = data->acpi_data.states[index].control;
@@ -876,10 +805,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
        data->numps = data->acpi_data.state_count;
        powernow_k8_acpi_pst_values(data, 0);
 
-       if (cpu_family == CPU_HW_PSTATE)
-               ret_val = fill_powernow_table_pstate(data, powernow_table);
-       else
-               ret_val = fill_powernow_table_fidvid(data, powernow_table);
+       ret_val = fill_powernow_table_fidvid(data, powernow_table);
        if (ret_val)
                goto err_out_mem;
 
@@ -916,51 +842,6 @@ err_out:
        return ret_val;
 }
 
-static int fill_powernow_table_pstate(struct powernow_k8_data *data,
-               struct cpufreq_frequency_table *powernow_table)
-{
-       int i;
-       u32 hi = 0, lo = 0;
-       rdmsr(MSR_PSTATE_CUR_LIMIT, lo, hi);
-       data->max_hw_pstate = (lo & HW_PSTATE_MAX_MASK) >> HW_PSTATE_MAX_SHIFT;
-
-       for (i = 0; i < data->acpi_data.state_count; i++) {
-               u32 index;
-
-               index = data->acpi_data.states[i].control & HW_PSTATE_MASK;
-               if (index > data->max_hw_pstate) {
-                       printk(KERN_ERR PFX "invalid pstate %d - "
-                                       "bad value %d.\n", i, index);
-                       printk(KERN_ERR PFX "Please report to BIOS "
-                                       "manufacturer\n");
-                       invalidate_entry(powernow_table, i);
-                       continue;
-               }
-
-               ps_to_as[index] = i;
-
-               /* Frequency may be rounded for these */
-               if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10)
-                                || boot_cpu_data.x86 == 0x11) {
-
-                       rdmsr(MSR_PSTATE_DEF_BASE + index, lo, hi);
-                       if (!(hi & HW_PSTATE_VALID_MASK)) {
-                               pr_debug("invalid pstate %d, ignoring\n", index);
-                               invalidate_entry(powernow_table, i);
-                               continue;
-                       }
-
-                       powernow_table[i].frequency =
-                               freq_from_fid_did(lo & 0x3f, (lo >> 6) & 7);
-               } else
-                       powernow_table[i].frequency =
-                               data->acpi_data.states[i].core_frequency * 1000;
-
-               powernow_table[i].index = index;
-       }
-       return 0;
-}
-
 static int fill_powernow_table_fidvid(struct powernow_k8_data *data,
                struct cpufreq_frequency_table *powernow_table)
 {
@@ -1037,15 +918,7 @@ static int get_transition_latency(struct powernow_k8_data *data)
                        max_latency = cur_latency;
        }
        if (max_latency == 0) {
-               /*
-                * Fam 11h and later may return 0 as transition latency. This
-                * is intended and means "very fast". While cpufreq core and
-                * governors currently can handle that gracefully, better set it
-                * to 1 to avoid problems in the future.
-                */
-               if (boot_cpu_data.x86 < 0x11)
-                       printk(KERN_ERR FW_WARN PFX "Invalid zero transition "
-                               "latency\n");
+               pr_err(FW_WARN PFX "Invalid zero transition latency\n");
                max_latency = 1;
        }
        /* value in usecs, needs to be in nanoseconds */
@@ -1105,40 +978,6 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data,
        return res;
 }
 
-/* Take a frequency, and issue the hardware pstate transition command */
-static int transition_frequency_pstate(struct powernow_k8_data *data,
-               unsigned int index)
-{
-       u32 pstate = 0;
-       int res, i;
-       struct cpufreq_freqs freqs;
-
-       pr_debug("cpu %d transition to index %u\n", smp_processor_id(), index);
-
-       /* get MSR index for hardware pstate transition */
-       pstate = index & HW_PSTATE_MASK;
-       if (pstate > data->max_hw_pstate)
-               return -EINVAL;
-
-       freqs.old = find_khz_freq_from_pstate(data->powernow_table,
-                       data->currpstate);
-       freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate);
-
-       for_each_cpu(i, data->available_cores) {
-               freqs.cpu = i;
-               cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
-       }
-
-       res = transition_pstate(data, pstate);
-       freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate);
-
-       for_each_cpu(i, data->available_cores) {
-               freqs.cpu = i;
-               cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
-       }
-       return res;
-}
-
 /* Driver entry point to switch to the target frequency */
 static int powernowk8_target(struct cpufreq_policy *pol,
                unsigned targfreq, unsigned relation)
@@ -1180,18 +1019,15 @@ static int powernowk8_target(struct cpufreq_policy *pol,
        if (query_current_values_with_pending_wait(data))
                goto err_out;
 
-       if (cpu_family != CPU_HW_PSTATE) {
-               pr_debug("targ: curr fid 0x%x, vid 0x%x\n",
-               data->currfid, data->currvid);
+       pr_debug("targ: curr fid 0x%x, vid 0x%x\n",
+                data->currfid, data->currvid);
 
-               if ((checkvid != data->currvid) ||
-                   (checkfid != data->currfid)) {
-                       printk(KERN_INFO PFX
-                               "error - out of sync, fix 0x%x 0x%x, "
-                               "vid 0x%x 0x%x\n",
-                               checkfid, data->currfid,
-                               checkvid, data->currvid);
-               }
+       if ((checkvid != data->currvid) ||
+           (checkfid != data->currfid)) {
+               pr_info(PFX
+                      "error - out of sync, fix 0x%x 0x%x, vid 0x%x 0x%x\n",
+                      checkfid, data->currfid,
+                      checkvid, data->currvid);
        }
 
        if (cpufreq_frequency_table_target(pol, data->powernow_table,
@@ -1202,11 +1038,8 @@ static int powernowk8_target(struct cpufreq_policy *pol,
 
        powernow_k8_acpi_pst_values(data, newstate);
 
-       if (cpu_family == CPU_HW_PSTATE)
-               ret = transition_frequency_pstate(data,
-                       data->powernow_table[newstate].index);
-       else
-               ret = transition_frequency_fidvid(data, newstate);
+       ret = transition_frequency_fidvid(data, newstate);
+
        if (ret) {
                printk(KERN_ERR PFX "transition frequency failed\n");
                ret = 1;
@@ -1215,11 +1048,7 @@ static int powernowk8_target(struct cpufreq_policy *pol,
        }
        mutex_unlock(&fidvid_mutex);
 
-       if (cpu_family == CPU_HW_PSTATE)
-               pol->cur = find_khz_freq_from_pstate(data->powernow_table,
-                               data->powernow_table[newstate].index);
-       else
-               pol->cur = find_khz_freq_from_fid(data->currfid);
+       pol->cur = find_khz_freq_from_fid(data->currfid);
        ret = 0;
 
 err_out:
@@ -1259,22 +1088,23 @@ static void __cpuinit powernowk8_cpu_init_on_cpu(void *_init_on_cpu)
                return;
        }
 
-       if (cpu_family == CPU_OPTERON)
-               fidvid_msr_init();
+       fidvid_msr_init();
 
        init_on_cpu->rc = 0;
 }
 
+static const char missing_pss_msg[] =
+       KERN_ERR
+       FW_BUG PFX "No compatible ACPI _PSS objects found.\n"
+       FW_BUG PFX "First, make sure Cool'N'Quiet is enabled in the BIOS.\n"
+       FW_BUG PFX "If that doesn't help, try upgrading your BIOS.\n";
+
 /* per CPU init entry point to the driver */
 static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
 {
-       static const char ACPI_PSS_BIOS_BUG_MSG[] =
-               KERN_ERR FW_BUG PFX "No compatible ACPI _PSS objects found.\n"
-               FW_BUG PFX "Try again with latest BIOS.\n";
        struct powernow_k8_data *data;
        struct init_on_cpu init_on_cpu;
        int rc;
-       struct cpuinfo_x86 *c = &cpu_data(pol->cpu);
 
        if (!cpu_online(pol->cpu))
                return -ENODEV;
@@ -1290,7 +1120,6 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
        }
 
        data->cpu = pol->cpu;
-       data->currpstate = HW_PSTATE_INVALID;
 
        if (powernow_k8_cpu_init_acpi(data)) {
                /*
@@ -1298,7 +1127,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
                 * an UP version, and is deprecated by AMD.
                 */
                if (num_online_cpus() != 1) {
-                       printk_once(ACPI_PSS_BIOS_BUG_MSG);
+                       printk_once(missing_pss_msg);
                        goto err_out;
                }
                if (pol->cpu != 0) {
@@ -1327,17 +1156,10 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
        if (rc != 0)
                goto err_out_exit_acpi;
 
-       if (cpu_family == CPU_HW_PSTATE)
-               cpumask_copy(pol->cpus, cpumask_of(pol->cpu));
-       else
-               cpumask_copy(pol->cpus, cpu_core_mask(pol->cpu));
+       cpumask_copy(pol->cpus, cpu_core_mask(pol->cpu));
        data->available_cores = pol->cpus;
 
-       if (cpu_family == CPU_HW_PSTATE)
-               pol->cur = find_khz_freq_from_pstate(data->powernow_table,
-                               data->currpstate);
-       else
-               pol->cur = find_khz_freq_from_fid(data->currfid);
+       pol->cur = find_khz_freq_from_fid(data->currfid);
        pr_debug("policy current frequency %d kHz\n", pol->cur);
 
        /* min/max the cpu is capable of */
@@ -1349,18 +1171,10 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
                return -EINVAL;
        }
 
-       /* Check for APERF/MPERF support in hardware */
-       if (cpu_has(c, X86_FEATURE_APERFMPERF))
-               cpufreq_amd64_driver.getavg = cpufreq_get_measured_perf;
-
        cpufreq_frequency_table_get_attr(data->powernow_table, pol->cpu);
 
-       if (cpu_family == CPU_HW_PSTATE)
-               pr_debug("cpu_init done, current pstate 0x%x\n",
-                               data->currpstate);
-       else
-               pr_debug("cpu_init done, current fid 0x%x, vid 0x%x\n",
-                       data->currfid, data->currvid);
+       pr_debug("cpu_init done, current fid 0x%x, vid 0x%x\n",
+                data->currfid, data->currvid);
 
        per_cpu(powernow_data, pol->cpu) = data;
 
@@ -1413,88 +1227,15 @@ static unsigned int powernowk8_get(unsigned int cpu)
        if (err)
                goto out;
 
-       if (cpu_family == CPU_HW_PSTATE)
-               khz = find_khz_freq_from_pstate(data->powernow_table,
-                                               data->currpstate);
-       else
-               khz = find_khz_freq_from_fid(data->currfid);
+       khz = find_khz_freq_from_fid(data->currfid);
 
 
 out:
        return khz;
 }
 
-static void _cpb_toggle_msrs(bool t)
-{
-       int cpu;
-
-       get_online_cpus();
-
-       rdmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs);
-
-       for_each_cpu(cpu, cpu_online_mask) {
-               struct msr *reg = per_cpu_ptr(msrs, cpu);
-               if (t)
-                       reg->l &= ~BIT(25);
-               else
-                       reg->l |= BIT(25);
-       }
-       wrmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs);
-
-       put_online_cpus();
-}
-
-/*
- * Switch on/off core performance boosting.
- *
- * 0=disable
- * 1=enable.
- */
-static void cpb_toggle(bool t)
-{
-       if (!cpb_capable)
-               return;
-
-       if (t && !cpb_enabled) {
-               cpb_enabled = true;
-               _cpb_toggle_msrs(t);
-               printk(KERN_INFO PFX "Core Boosting enabled.\n");
-       } else if (!t && cpb_enabled) {
-               cpb_enabled = false;
-               _cpb_toggle_msrs(t);
-               printk(KERN_INFO PFX "Core Boosting disabled.\n");
-       }
-}
-
-static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf,
-                                size_t count)
-{
-       int ret = -EINVAL;
-       unsigned long val = 0;
-
-       ret = strict_strtoul(buf, 10, &val);
-       if (!ret && (val == 0 || val == 1) && cpb_capable)
-               cpb_toggle(val);
-       else
-               return -EINVAL;
-
-       return count;
-}
-
-static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf)
-{
-       return sprintf(buf, "%u\n", cpb_enabled);
-}
-
-#define define_one_rw(_name) \
-static struct freq_attr _name = \
-__ATTR(_name, 0644, show_##_name, store_##_name)
-
-define_one_rw(cpb);
-
 static struct freq_attr *powernow_k8_attr[] = {
        &cpufreq_freq_attr_scaling_available_freqs,
-       &cpb,
        NULL,
 };
 
@@ -1510,53 +1251,18 @@ static struct cpufreq_driver cpufreq_amd64_driver = {
        .attr           = powernow_k8_attr,
 };
 
-/*
- * Clear the boost-disable flag on the CPU_DOWN path so that this cpu
- * cannot block the remaining ones from boosting. On the CPU_UP path we
- * simply keep the boost-disable flag in sync with the current global
- * state.
- */
-static int cpb_notify(struct notifier_block *nb, unsigned long action,
-                     void *hcpu)
-{
-       unsigned cpu = (long)hcpu;
-       u32 lo, hi;
-
-       switch (action) {
-       case CPU_UP_PREPARE:
-       case CPU_UP_PREPARE_FROZEN:
-
-               if (!cpb_enabled) {
-                       rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
-                       lo |= BIT(25);
-                       wrmsr_on_cpu(cpu, MSR_K7_HWCR, lo, hi);
-               }
-               break;
-
-       case CPU_DOWN_PREPARE:
-       case CPU_DOWN_PREPARE_FROZEN:
-               rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
-               lo &= ~BIT(25);
-               wrmsr_on_cpu(cpu, MSR_K7_HWCR, lo, hi);
-               break;
-
-       default:
-               break;
-       }
-
-       return NOTIFY_OK;
-}
-
-static struct notifier_block cpb_nb = {
-       .notifier_call          = cpb_notify,
-};
-
 /* driver entry point for init */
 static int __cpuinit powernowk8_init(void)
 {
-       unsigned int i, supported_cpus = 0, cpu;
+       unsigned int i, supported_cpus = 0;
        int rv;
 
+       if (static_cpu_has(X86_FEATURE_HW_PSTATE)) {
+               pr_warn(PFX "this CPU is not supported anymore, using acpi-cpufreq instead.\n");
+               request_module("acpi-cpufreq");
+               return -ENODEV;
+       }
+
        if (!x86_match_cpu(powernow_k8_ids))
                return -ENODEV;
 
@@ -1570,38 +1276,13 @@ static int __cpuinit powernowk8_init(void)
        if (supported_cpus != num_online_cpus())
                return -ENODEV;
 
-       printk(KERN_INFO PFX "Found %d %s (%d cpu cores) (" VERSION ")\n",
-               num_online_nodes(), boot_cpu_data.x86_model_id, supported_cpus);
-
-       if (boot_cpu_has(X86_FEATURE_CPB)) {
-
-               cpb_capable = true;
-
-               msrs = msrs_alloc();
-               if (!msrs) {
-                       printk(KERN_ERR "%s: Error allocating msrs!\n", __func__);
-                       return -ENOMEM;
-               }
-
-               register_cpu_notifier(&cpb_nb);
-
-               rdmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs);
+       rv = cpufreq_register_driver(&cpufreq_amd64_driver);
 
-               for_each_cpu(cpu, cpu_online_mask) {
-                       struct msr *reg = per_cpu_ptr(msrs, cpu);
-                       cpb_enabled |= !(!!(reg->l & BIT(25)));
-               }
+       if (!rv)
+               pr_info(PFX "Found %d %s (%d cpu cores) (" VERSION ")\n",
+                       num_online_nodes(), boot_cpu_data.x86_model_id,
+                       supported_cpus);
 
-               printk(KERN_INFO PFX "Core Performance Boosting: %s.\n",
-                       (cpb_enabled ? "on" : "off"));
-       }
-
-       rv = cpufreq_register_driver(&cpufreq_amd64_driver);
-       if (rv < 0 && boot_cpu_has(X86_FEATURE_CPB)) {
-               unregister_cpu_notifier(&cpb_nb);
-               msrs_free(msrs);
-               msrs = NULL;
-       }
        return rv;
 }
 
@@ -1610,13 +1291,6 @@ static void __exit powernowk8_exit(void)
 {
        pr_debug("exit\n");
 
-       if (boot_cpu_has(X86_FEATURE_CPB)) {
-               msrs_free(msrs);
-               msrs = NULL;
-
-               unregister_cpu_notifier(&cpb_nb);
-       }
-
        cpufreq_unregister_driver(&cpufreq_amd64_driver);
 }
 
index 3744d26cdc2b31524fe6686e33a28f2ada18f30b..79329d4d5abea9d9d7ff537bf636de9296cddcfe 100644 (file)
@@ -5,24 +5,11 @@
  *  http://www.gnu.org/licenses/gpl.html
  */
 
-enum pstate {
-       HW_PSTATE_INVALID = 0xff,
-       HW_PSTATE_0 = 0,
-       HW_PSTATE_1 = 1,
-       HW_PSTATE_2 = 2,
-       HW_PSTATE_3 = 3,
-       HW_PSTATE_4 = 4,
-       HW_PSTATE_5 = 5,
-       HW_PSTATE_6 = 6,
-       HW_PSTATE_7 = 7,
-};
-
 struct powernow_k8_data {
        unsigned int cpu;
 
        u32 numps;  /* number of p-states */
        u32 batps;  /* number of p-states supported on battery */
-       u32 max_hw_pstate; /* maximum legal hardware pstate */
 
        /* these values are constant when the PSB is used to determine
         * vid/fid pairings, but are modified during the ->target() call
@@ -37,7 +24,6 @@ struct powernow_k8_data {
        /* keep track of the current fid / vid or pstate */
        u32 currvid;
        u32 currfid;
-       enum pstate currpstate;
 
        /* the powernow_table includes all frequency and vid/fid pairings:
         * fid are the lower 8 bits of the index, vid are the upper 8 bits.
@@ -97,23 +83,6 @@ struct powernow_k8_data {
 #define MSR_S_HI_CURRENT_VID      0x0000003f
 #define MSR_C_HI_STP_GNT_BENIGN          0x00000001
 
-
-/* Hardware Pstate _PSS and MSR definitions */
-#define USE_HW_PSTATE          0x00000080
-#define HW_PSTATE_MASK                 0x00000007
-#define HW_PSTATE_VALID_MASK   0x80000000
-#define HW_PSTATE_MAX_MASK     0x000000f0
-#define HW_PSTATE_MAX_SHIFT    4
-#define MSR_PSTATE_DEF_BASE    0xc0010064 /* base of Pstate MSRs */
-#define MSR_PSTATE_STATUS      0xc0010063 /* Pstate Status MSR */
-#define MSR_PSTATE_CTRL        0xc0010062 /* Pstate control MSR */
-#define MSR_PSTATE_CUR_LIMIT   0xc0010061 /* pstate current limit MSR */
-
-/* define the two driver architectures */
-#define CPU_OPTERON 0
-#define CPU_HW_PSTATE 1
-
-
 /*
  * There are restrictions frequencies have to follow:
  * - only 1 entry in the low fid table ( <=1.4GHz )
@@ -218,5 +187,4 @@ static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid);
 
 static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index);
 
-static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table);
 static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table);
index 58bf3b1ac9c46035c2a3fae79271954f6d436442..87db3877fead7280d93eaf53baabcc6e4fe0f043 100644 (file)
@@ -18,9 +18,10 @@ static struct cpuidle_driver *cpuidle_curr_driver;
 DEFINE_SPINLOCK(cpuidle_driver_lock);
 int cpuidle_driver_refcount;
 
-static void __cpuidle_register_driver(struct cpuidle_driver *drv)
+static void set_power_states(struct cpuidle_driver *drv)
 {
        int i;
+
        /*
         * cpuidle driver should set the drv->power_specified bit
         * before registering if the driver provides
@@ -35,13 +36,10 @@ static void __cpuidle_register_driver(struct cpuidle_driver *drv)
         * an power value of -1.  So we use -2, -3, etc, for other
         * c-states.
         */
-       if (!drv->power_specified) {
-               for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++)
-                       drv->states[i].power_usage = -1 - i;
-       }
+       for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++)
+               drv->states[i].power_usage = -1 - i;
 }
 
-
 /**
  * cpuidle_register_driver - registers a driver
  * @drv: the driver
@@ -59,13 +57,16 @@ int cpuidle_register_driver(struct cpuidle_driver *drv)
                spin_unlock(&cpuidle_driver_lock);
                return -EBUSY;
        }
-       __cpuidle_register_driver(drv);
+
+       if (!drv->power_specified)
+               set_power_states(drv);
+
        cpuidle_curr_driver = drv;
+
        spin_unlock(&cpuidle_driver_lock);
 
        return 0;
 }
-
 EXPORT_SYMBOL_GPL(cpuidle_register_driver);
 
 /**
@@ -96,7 +97,6 @@ void cpuidle_unregister_driver(struct cpuidle_driver *drv)
 
        spin_unlock(&cpuidle_driver_lock);
 }
-
 EXPORT_SYMBOL_GPL(cpuidle_unregister_driver);
 
 struct cpuidle_driver *cpuidle_driver_ref(void)
index b6a09ea859b135c31e6dcffbad0e7a3d2ad66eee..9b784051ec12b47564b3d07429096556b2fc2035 100644 (file)
@@ -88,6 +88,8 @@ static int ladder_select_state(struct cpuidle_driver *drv,
 
        /* consider promotion */
        if (last_idx < drv->state_count - 1 &&
+           !drv->states[last_idx + 1].disabled &&
+           !dev->states_usage[last_idx + 1].disable &&
            last_residency > last_state->threshold.promotion_time &&
            drv->states[last_idx + 1].exit_latency <= latency_req) {
                last_state->stats.promotion_count++;
@@ -100,7 +102,9 @@ static int ladder_select_state(struct cpuidle_driver *drv,
 
        /* consider demotion */
        if (last_idx > CPUIDLE_DRIVER_STATE_START &&
-           drv->states[last_idx].exit_latency > latency_req) {
+           (drv->states[last_idx].disabled ||
+           dev->states_usage[last_idx].disable ||
+           drv->states[last_idx].exit_latency > latency_req)) {
                int i;
 
                for (i = last_idx - 1; i > CPUIDLE_DRIVER_STATE_START; i--) {
index 002888185f170e92fa798233a40c64ef2258d42c..d216cd3cc569ecdbf195f45b055a5489470f0b70 100644 (file)
@@ -120,3 +120,4 @@ u32 gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
 
        return ret;
 }
+EXPORT_SYMBOL(gen_split_key);
index 920a609b2c35857c2fa64c8341ad6561fe6f7d65..38f9e52f358b17596b8b41177d6af493d509f84b 100644 (file)
@@ -669,13 +669,18 @@ static int __devinit max77693_muic_probe(struct platform_device *pdev)
        }
        info->dev = &pdev->dev;
        info->max77693 = max77693;
-       info->max77693->regmap_muic = regmap_init_i2c(info->max77693->muic,
-                                        &max77693_muic_regmap_config);
-       if (IS_ERR(info->max77693->regmap_muic)) {
-               ret = PTR_ERR(info->max77693->regmap_muic);
-               dev_err(max77693->dev,
-                       "failed to allocate register map: %d\n", ret);
-               goto err_regmap;
+       if (info->max77693->regmap_muic)
+               dev_dbg(&pdev->dev, "allocate register map\n");
+       else {
+               info->max77693->regmap_muic = devm_regmap_init_i2c(
+                                               info->max77693->muic,
+                                               &max77693_muic_regmap_config);
+               if (IS_ERR(info->max77693->regmap_muic)) {
+                       ret = PTR_ERR(info->max77693->regmap_muic);
+                       dev_err(max77693->dev,
+                               "failed to allocate register map: %d\n", ret);
+                       goto err_regmap;
+               }
        }
        platform_set_drvdata(pdev, info);
        mutex_init(&info->mutex);
index b16c8a72a2e241a8a47c524564a02c48a5389b35..ba7926f5c0996ea78f571ab5e87de2a3aa2c490e 100644 (file)
@@ -294,7 +294,7 @@ config GPIO_MAX732X_IRQ
 
 config GPIO_MC9S08DZ60
        bool "MX35 3DS BOARD MC9S08DZ60 GPIO functions"
-       depends on I2C && MACH_MX35_3DS
+       depends on I2C=y && MACH_MX35_3DS
        help
          Select this to enable the MC9S08DZ60 GPIO driver
 
index ae37181798b3c979d9ce423a437f7ba09511166a..ec48ed5126284d4f41303a5ac4e654f856ca4b7a 100644 (file)
@@ -247,9 +247,9 @@ static int __devinit em_gio_irq_domain_init(struct em_gio_priv *p)
 
        p->irq_base = irq_alloc_descs(pdata->irq_base, 0,
                                      pdata->number_of_pins, numa_node_id());
-       if (IS_ERR_VALUE(p->irq_base)) {
+       if (p->irq_base < 0) {
                dev_err(&pdev->dev, "cannot get irq_desc\n");
-               return -ENXIO;
+               return p->irq_base;
        }
        pr_debug("gio: hw base = %d, nr = %d, sw base = %d\n",
                 pdata->gpio_base, pdata->number_of_pins, p->irq_base);
index e97016af64434e292db5025240295fa3d1859dd6..b62d443e9a59125518289229b0a90c6d58503363 100644 (file)
@@ -170,6 +170,7 @@ static int __devinit rdc321x_gpio_probe(struct platform_device *pdev)
        rdc321x_gpio_dev->reg2_data_base = r->start + 0x4;
 
        rdc321x_gpio_dev->chip.label = "rdc321x-gpio";
+       rdc321x_gpio_dev->chip.owner = THIS_MODULE;
        rdc321x_gpio_dev->chip.direction_input = rdc_gpio_direction_input;
        rdc321x_gpio_dev->chip.direction_output = rdc_gpio_config;
        rdc321x_gpio_dev->chip.get = rdc_gpio_get_value;
index a18c4aa68b1e8dcfadd5340e3f076bf33ecb788f..f1a45997aea8c3255aa7ecb697c72abe0beaf6d0 100644 (file)
@@ -82,7 +82,7 @@ int of_get_named_gpio_flags(struct device_node *np, const char *propname,
        gpiochip_find(&gg_data, of_gpiochip_find_and_xlate);
 
        of_node_put(gg_data.gpiospec.np);
-       pr_debug("%s exited with status %d\n", __func__, ret);
+       pr_debug("%s exited with status %d\n", __func__, gg_data.out_gpio);
        return gg_data.out_gpio;
 }
 EXPORT_SYMBOL(of_get_named_gpio_flags);
index d0c4574ef49c1d60b074643b278580a72ff8dc69..36164806b9d475aabff5165f47d481c8a93ce644 100644 (file)
@@ -193,6 +193,9 @@ static const struct file_operations ast_fops = {
        .mmap = ast_mmap,
        .poll = drm_poll,
        .fasync = drm_fasync,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl = drm_compat_ioctl,
+#endif
        .read = drm_read,
 };
 
index 7282c081fb53000397346d6c34879c06f940a4bd..a712cafcfa1dfde6f76e5c94a77307b7c1f1041a 100644 (file)
@@ -841,7 +841,7 @@ int ast_cursor_init(struct drm_device *dev)
 
        ast->cursor_cache = obj;
        ast->cursor_cache_gpu_addr = gpu_addr;
-       DRM_ERROR("pinned cursor cache at %llx\n", ast->cursor_cache_gpu_addr);
+       DRM_DEBUG_KMS("pinned cursor cache at %llx\n", ast->cursor_cache_gpu_addr);
        return 0;
 fail:
        return ret;
index 7053140c65969758f9f22ded7cea130fe0ab7bd5..b83a2d7ddd1ae8ec04ae0df1b1e647fbc1e327af 100644 (file)
@@ -74,6 +74,9 @@ static const struct file_operations cirrus_driver_fops = {
        .unlocked_ioctl = drm_ioctl,
        .mmap = cirrus_mmap,
        .poll = drm_poll,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl = drm_compat_ioctl,
+#endif
        .fasync = drm_fasync,
 };
 static struct drm_driver driver = {
index 7f5096763b7d387c25713d7e9ee95e17ce90f7d6..59a26e577b57f423077d5a2c86d75364f14b6838 100644 (file)
@@ -36,6 +36,6 @@ config DRM_EXYNOS_VIDI
 
 config DRM_EXYNOS_G2D
        bool "Exynos DRM G2D"
-       depends on DRM_EXYNOS
+       depends on DRM_EXYNOS && !VIDEO_SAMSUNG_S5P_G2D
        help
          Choose this option if you want to use Exynos G2D for DRM.
index 613bf8a5d9b268331779b0f60f43f52a047269de..ae13febe0eaa64e821adf2f5db306e5ebdd07186 100644 (file)
@@ -163,6 +163,12 @@ static void exynos_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
        /* TODO */
 }
 
+static int exynos_gem_dmabuf_mmap(struct dma_buf *dma_buf,
+       struct vm_area_struct *vma)
+{
+       return -ENOTTY;
+}
+
 static struct dma_buf_ops exynos_dmabuf_ops = {
        .map_dma_buf            = exynos_gem_map_dma_buf,
        .unmap_dma_buf          = exynos_gem_unmap_dma_buf,
@@ -170,6 +176,7 @@ static struct dma_buf_ops exynos_dmabuf_ops = {
        .kmap_atomic            = exynos_gem_dmabuf_kmap_atomic,
        .kunmap                 = exynos_gem_dmabuf_kunmap,
        .kunmap_atomic          = exynos_gem_dmabuf_kunmap_atomic,
+       .mmap                   = exynos_gem_dmabuf_mmap,
        .release                = exynos_dmabuf_release,
 };
 
index ebacec6f1e48efef646700c630f02813007eaac0..d07071937453302fc74793c4aecb48a086b8069f 100644 (file)
@@ -160,7 +160,6 @@ static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
        if (!file_priv)
                return -ENOMEM;
 
-       drm_prime_init_file_private(&file->prime);
        file->driver_priv = file_priv;
 
        return exynos_drm_subdrv_open(dev, file);
@@ -184,7 +183,6 @@ static void exynos_drm_preclose(struct drm_device *dev,
                        e->base.destroy(&e->base);
                }
        }
-       drm_prime_destroy_file_private(&file->prime);
        spin_unlock_irqrestore(&dev->event_lock, flags);
 
        exynos_drm_subdrv_close(dev, file);
@@ -241,6 +239,9 @@ static const struct file_operations exynos_drm_driver_fops = {
        .poll           = drm_poll,
        .read           = drm_read,
        .unlocked_ioctl = drm_ioctl,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl = drm_compat_ioctl,
+#endif
        .release        = drm_release,
 };
 
index a68d2b313f03cab1d42fae0d362159f6a4f1ba26..b19cd93e70472b325224611d5725614c33f72f83 100644 (file)
@@ -831,11 +831,6 @@ static int __devinit fimd_probe(struct platform_device *pdev)
        }
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(dev, "failed to find registers\n");
-               ret = -ENOENT;
-               goto err_clk;
-       }
 
        ctx->regs = devm_request_and_ioremap(&pdev->dev, res);
        if (!ctx->regs) {
index d2d88f22a037a8ace36fae877fd45dbf60357b6c..1065e90d09199414585adca93e2e586b9fe215f0 100644 (file)
@@ -129,7 +129,6 @@ struct g2d_runqueue_node {
 struct g2d_data {
        struct device                   *dev;
        struct clk                      *gate_clk;
-       struct resource                 *regs_res;
        void __iomem                    *regs;
        int                             irq;
        struct workqueue_struct         *g2d_workq;
@@ -751,7 +750,7 @@ static int __devinit g2d_probe(struct platform_device *pdev)
        struct exynos_drm_subdrv *subdrv;
        int ret;
 
-       g2d = kzalloc(sizeof(*g2d), GFP_KERNEL);
+       g2d = devm_kzalloc(&pdev->dev, sizeof(*g2d), GFP_KERNEL);
        if (!g2d) {
                dev_err(dev, "failed to allocate driver data\n");
                return -ENOMEM;
@@ -759,10 +758,8 @@ static int __devinit g2d_probe(struct platform_device *pdev)
 
        g2d->runqueue_slab = kmem_cache_create("g2d_runqueue_slab",
                        sizeof(struct g2d_runqueue_node), 0, 0, NULL);
-       if (!g2d->runqueue_slab) {
-               ret = -ENOMEM;
-               goto err_free_mem;
-       }
+       if (!g2d->runqueue_slab)
+               return -ENOMEM;
 
        g2d->dev = dev;
 
@@ -794,38 +791,26 @@ static int __devinit g2d_probe(struct platform_device *pdev)
        pm_runtime_enable(dev);
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(dev, "failed to get I/O memory\n");
-               ret = -ENOENT;
-               goto err_put_clk;
-       }
 
-       g2d->regs_res = request_mem_region(res->start, resource_size(res),
-                                          dev_name(dev));
-       if (!g2d->regs_res) {
-               dev_err(dev, "failed to request I/O memory\n");
-               ret = -ENOENT;
-               goto err_put_clk;
-       }
-
-       g2d->regs = ioremap(res->start, resource_size(res));
+       g2d->regs = devm_request_and_ioremap(&pdev->dev, res);
        if (!g2d->regs) {
                dev_err(dev, "failed to remap I/O memory\n");
                ret = -ENXIO;
-               goto err_release_res;
+               goto err_put_clk;
        }
 
        g2d->irq = platform_get_irq(pdev, 0);
        if (g2d->irq < 0) {
                dev_err(dev, "failed to get irq\n");
                ret = g2d->irq;
-               goto err_unmap_base;
+               goto err_put_clk;
        }
 
-       ret = request_irq(g2d->irq, g2d_irq_handler, 0, "drm_g2d", g2d);
+       ret = devm_request_irq(&pdev->dev, g2d->irq, g2d_irq_handler, 0,
+                                                               "drm_g2d", g2d);
        if (ret < 0) {
                dev_err(dev, "irq request failed\n");
-               goto err_unmap_base;
+               goto err_put_clk;
        }
 
        platform_set_drvdata(pdev, g2d);
@@ -838,7 +823,7 @@ static int __devinit g2d_probe(struct platform_device *pdev)
        ret = exynos_drm_subdrv_register(subdrv);
        if (ret < 0) {
                dev_err(dev, "failed to register drm g2d device\n");
-               goto err_free_irq;
+               goto err_put_clk;
        }
 
        dev_info(dev, "The exynos g2d(ver %d.%d) successfully probed\n",
@@ -846,13 +831,6 @@ static int __devinit g2d_probe(struct platform_device *pdev)
 
        return 0;
 
-err_free_irq:
-       free_irq(g2d->irq, g2d);
-err_unmap_base:
-       iounmap(g2d->regs);
-err_release_res:
-       release_resource(g2d->regs_res);
-       kfree(g2d->regs_res);
 err_put_clk:
        pm_runtime_disable(dev);
        clk_put(g2d->gate_clk);
@@ -862,8 +840,6 @@ err_destroy_workqueue:
        destroy_workqueue(g2d->g2d_workq);
 err_destroy_slab:
        kmem_cache_destroy(g2d->runqueue_slab);
-err_free_mem:
-       kfree(g2d);
        return ret;
 }
 
@@ -873,24 +849,18 @@ static int __devexit g2d_remove(struct platform_device *pdev)
 
        cancel_work_sync(&g2d->runqueue_work);
        exynos_drm_subdrv_unregister(&g2d->subdrv);
-       free_irq(g2d->irq, g2d);
 
        while (g2d->runqueue_node) {
                g2d_free_runqueue_node(g2d, g2d->runqueue_node);
                g2d->runqueue_node = g2d_get_runqueue_node(g2d);
        }
 
-       iounmap(g2d->regs);
-       release_resource(g2d->regs_res);
-       kfree(g2d->regs_res);
-
        pm_runtime_disable(&pdev->dev);
        clk_put(g2d->gate_clk);
 
        g2d_fini_cmdlist(g2d);
        destroy_workqueue(g2d->g2d_workq);
        kmem_cache_destroy(g2d->runqueue_slab);
-       kfree(g2d);
 
        return 0;
 }
@@ -924,7 +894,7 @@ static int g2d_resume(struct device *dev)
 }
 #endif
 
-SIMPLE_DEV_PM_OPS(g2d_pm_ops, g2d_suspend, g2d_resume);
+static SIMPLE_DEV_PM_OPS(g2d_pm_ops, g2d_suspend, g2d_resume);
 
 struct platform_driver g2d_driver = {
        .probe          = g2d_probe,
index f9efde40c097b819af24d41ff32162f3191ea632..a38051c95ec4384176ddc5f8bd8937e4270c3fb6 100644 (file)
@@ -122,7 +122,7 @@ fail:
                __free_page(pages[i]);
 
        drm_free_large(pages);
-       return ERR_PTR(PTR_ERR(p));
+       return ERR_CAST(p);
 }
 
 static void exynos_gem_put_pages(struct drm_gem_object *obj,
@@ -662,7 +662,7 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
         */
 
        args->pitch = args->width * ((args->bpp + 7) / 8);
-       args->size = PAGE_ALIGN(args->pitch * args->height);
+       args->size = args->pitch * args->height;
 
        exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
        if (IS_ERR(exynos_gem_obj))
index 8ffcdf8b9e223ffdfe7552aa1d57b4334cba19cc..3fdf0b65f47e6659b820d1fa343005b682e354b1 100644 (file)
@@ -345,7 +345,7 @@ static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev)
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
-       ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+       ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
        if (!ctx) {
                DRM_LOG_KMS("failed to alloc common hdmi context.\n");
                return -ENOMEM;
@@ -371,7 +371,6 @@ static int __devexit exynos_drm_hdmi_remove(struct platform_device *pdev)
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
        exynos_drm_subdrv_unregister(&ctx->subdrv);
-       kfree(ctx);
 
        return 0;
 }
index b89829e5043a59a67152c5cafc241265f0183f12..e1f94b746bd7e0a9419e40ce694bf55e1bff36ab 100644 (file)
@@ -29,7 +29,6 @@ static const uint32_t formats[] = {
        DRM_FORMAT_XRGB8888,
        DRM_FORMAT_ARGB8888,
        DRM_FORMAT_NV12,
-       DRM_FORMAT_NV12M,
        DRM_FORMAT_NV12MT,
 };
 
index bb1550c4dd57db37554954537ef19c61d4e513ce..537027a74fd54f06b49c1387c1650bf90abc11f1 100644 (file)
@@ -633,7 +633,7 @@ static int __devinit vidi_probe(struct platform_device *pdev)
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
-       ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+       ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
        if (!ctx)
                return -ENOMEM;
 
@@ -673,8 +673,6 @@ static int __devexit vidi_remove(struct platform_device *pdev)
                ctx->raw_edid = NULL;
        }
 
-       kfree(ctx);
-
        return 0;
 }
 
index 409e2ec1207c3ddbe4ecae65f9fae1498fcd6410..a6aea6f3ea1ab842d8c6d26a5ecb7e606c335f0d 100644 (file)
@@ -2172,7 +2172,7 @@ static int __devinit hdmi_resources_init(struct hdmi_context *hdata)
 
        DRM_DEBUG_KMS("HDMI resource init\n");
 
-       memset(res, 0, sizeof *res);
+       memset(res, 0, sizeof(*res));
 
        /* get clocks, power */
        res->hdmi = clk_get(dev, "hdmi");
@@ -2204,7 +2204,7 @@ static int __devinit hdmi_resources_init(struct hdmi_context *hdata)
        clk_set_parent(res->sclk_hdmi, res->sclk_pixel);
 
        res->regul_bulk = kzalloc(ARRAY_SIZE(supply) *
-               sizeof res->regul_bulk[0], GFP_KERNEL);
+               sizeof(res->regul_bulk[0]), GFP_KERNEL);
        if (!res->regul_bulk) {
                DRM_ERROR("failed to get memory for regulators\n");
                goto fail;
@@ -2243,7 +2243,7 @@ static int hdmi_resources_cleanup(struct hdmi_context *hdata)
                clk_put(res->sclk_hdmi);
        if (!IS_ERR_OR_NULL(res->hdmi))
                clk_put(res->hdmi);
-       memset(res, 0, sizeof *res);
+       memset(res, 0, sizeof(*res));
 
        return 0;
 }
@@ -2312,11 +2312,6 @@ static int __devinit hdmi_probe(struct platform_device *pdev)
        }
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               DRM_ERROR("failed to find registers\n");
-               ret = -ENOENT;
-               goto err_resource;
-       }
 
        hdata->regs = devm_request_and_ioremap(&pdev->dev, res);
        if (!hdata->regs) {
index 30fcc12f81dd943802031936dcfc9ea81b8f1548..25b97d5e5fcb44679a331b8e8e5b1a9da7d7d3ca 100644 (file)
@@ -236,11 +236,11 @@ static inline void vp_filter_set(struct mixer_resources *res,
 static void vp_default_filter(struct mixer_resources *res)
 {
        vp_filter_set(res, VP_POLY8_Y0_LL,
-               filter_y_horiz_tap8, sizeof filter_y_horiz_tap8);
+               filter_y_horiz_tap8, sizeof(filter_y_horiz_tap8));
        vp_filter_set(res, VP_POLY4_Y0_LL,
-               filter_y_vert_tap4, sizeof filter_y_vert_tap4);
+               filter_y_vert_tap4, sizeof(filter_y_vert_tap4));
        vp_filter_set(res, VP_POLY4_C0_LL,
-               filter_cr_horiz_tap4, sizeof filter_cr_horiz_tap4);
+               filter_cr_horiz_tap4, sizeof(filter_cr_horiz_tap4));
 }
 
 static void mixer_vsync_set_update(struct mixer_context *ctx, bool enable)
index 0f9b7db80f6bdc91193ef7bd914a580e2b5eec26..cf49ba5a54bf3a351b4bda14c8fd3d4daf26df73 100644 (file)
@@ -476,6 +476,7 @@ static const struct psb_offset oaktrail_regmap[2] = {
                .pos = DSPAPOS,
                .surf = DSPASURF,
                .addr = MRST_DSPABASE,
+               .base = MRST_DSPABASE,
                .status = PIPEASTAT,
                .linoff = DSPALINOFF,
                .tileoff = DSPATILEOFF,
@@ -499,6 +500,7 @@ static const struct psb_offset oaktrail_regmap[2] = {
                .pos = DSPBPOS,
                .surf = DSPBSURF,
                .addr = DSPBBASE,
+               .base = DSPBBASE,
                .status = PIPEBSTAT,
                .linoff = DSPBLINOFF,
                .tileoff = DSPBTILEOFF,
index 57d892eaaa6effc66defdbb890bb9f1b68208515..463ec6871fe998229659eaa82fb8237a57109543 100644 (file)
@@ -115,6 +115,9 @@ static const struct file_operations i810_buffer_fops = {
        .unlocked_ioctl = drm_ioctl,
        .mmap = i810_mmap_buffers,
        .fasync = drm_fasync,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl = drm_compat_ioctl,
+#endif
        .llseek = noop_llseek,
 };
 
index f9924ad04d0993d5d6157918082b9d4d5d3ed604..48cfcca2b350122f0b3ed5fdb7e78dee78af8dd6 100644 (file)
@@ -51,6 +51,9 @@ static const struct file_operations i810_driver_fops = {
        .mmap = drm_mmap,
        .poll = drm_poll,
        .fasync = drm_fasync,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl = drm_compat_ioctl,
+#endif
        .llseek = noop_llseek,
 };
 
index 9cf7dfe022b989a23e02cb8ad45cd2177f956605..914c0dfabe6048113abc150b38e06a107eb33c5b 100644 (file)
@@ -1587,6 +1587,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
        spin_lock_init(&dev_priv->irq_lock);
        spin_lock_init(&dev_priv->error_lock);
        spin_lock_init(&dev_priv->rps_lock);
+       spin_lock_init(&dev_priv->dpio_lock);
 
        if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
                dev_priv->num_pipe = 3;
index 8a3828528b9ddfa9678e7bfbf015bd7acdfdc29d..5249640cce1381c912c76ec73c6658cde12ae95f 100644 (file)
@@ -2700,9 +2700,6 @@ void intel_irq_init(struct drm_device *dev)
                        dev->driver->irq_handler = i8xx_irq_handler;
                        dev->driver->irq_uninstall = i8xx_irq_uninstall;
                } else if (INTEL_INFO(dev)->gen == 3) {
-                       /* IIR "flip pending" means done if this bit is set */
-                       I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
-
                        dev->driver->irq_preinstall = i915_irq_preinstall;
                        dev->driver->irq_postinstall = i915_irq_postinstall;
                        dev->driver->irq_uninstall = i915_irq_uninstall;
index 2dfa6cf4886b6a2a3b31b90d1df489634a30df3f..bc2ad348e5d8ce9e13c51b1c6dba019e0e2acc4e 100644 (file)
@@ -1376,7 +1376,8 @@ static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
             "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
             reg, pipe_name(pipe));
 
-       WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_PIPE_B_SELECT),
+       WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0
+            && (val & DP_PIPEB_SELECT),
             "IBX PCH dp port still using transcoder B\n");
 }
 
@@ -1388,7 +1389,8 @@ static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
             "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
             reg, pipe_name(pipe));
 
-       WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_PIPE_B_SELECT),
+       WARN(HAS_PCH_IBX(dev_priv->dev) && (val & PORT_ENABLE) == 0
+            && (val & SDVO_PIPE_B_SELECT),
             "IBX PCH hdmi port still using transcoder B\n");
 }
 
index a6c426afaa7aca46144f5a3710c0ea6f10a46b83..ace757af913366db3e7cff4e670fa9c804c84d08 100644 (file)
@@ -2533,14 +2533,10 @@ intel_dp_init(struct drm_device *dev, int output_reg)
                        break;
        }
 
-       intel_dp_i2c_init(intel_dp, intel_connector, name);
-
        /* Cache some DPCD data in the eDP case */
        if (is_edp(intel_dp)) {
-               bool ret;
                struct edp_power_seq    cur, vbt;
                u32 pp_on, pp_off, pp_div;
-               struct edid *edid;
 
                pp_on = I915_READ(PCH_PP_ON_DELAYS);
                pp_off = I915_READ(PCH_PP_OFF_DELAYS);
@@ -2591,6 +2587,13 @@ intel_dp_init(struct drm_device *dev, int output_reg)
 
                DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
                              intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
+       }
+
+       intel_dp_i2c_init(intel_dp, intel_connector, name);
+
+       if (is_edp(intel_dp)) {
+               bool ret;
+               struct edid *edid;
 
                ironlake_edp_panel_vdd_on(intel_dp);
                ret = intel_dp_get_dpcd(intel_dp);
index 3df4f5fa892ad847b394c2050e8956779473afd8..e019b236986128bae46c61bf49180f7ec2502a0d 100644 (file)
@@ -162,19 +162,12 @@ static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv)
        return val;
 }
 
-u32 intel_panel_get_max_backlight(struct drm_device *dev)
+static u32 _intel_panel_get_max_backlight(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 max;
 
        max = i915_read_blc_pwm_ctl(dev_priv);
-       if (max == 0) {
-               /* XXX add code here to query mode clock or hardware clock
-                * and program max PWM appropriately.
-                */
-               pr_warn_once("fixme: max PWM is zero\n");
-               return 1;
-       }
 
        if (HAS_PCH_SPLIT(dev)) {
                max >>= 16;
@@ -188,6 +181,22 @@ u32 intel_panel_get_max_backlight(struct drm_device *dev)
                        max *= 0xff;
        }
 
+       return max;
+}
+
+u32 intel_panel_get_max_backlight(struct drm_device *dev)
+{
+       u32 max;
+
+       max = _intel_panel_get_max_backlight(dev);
+       if (max == 0) {
+               /* XXX add code here to query mode clock or hardware clock
+                * and program max PWM appropriately.
+                */
+               pr_warn_once("fixme: max PWM is zero\n");
+               return 1;
+       }
+
        DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max);
        return max;
 }
@@ -424,7 +433,11 @@ int intel_panel_setup_backlight(struct drm_device *dev)
 
        memset(&props, 0, sizeof(props));
        props.type = BACKLIGHT_RAW;
-       props.max_brightness = intel_panel_get_max_backlight(dev);
+       props.max_brightness = _intel_panel_get_max_backlight(dev);
+       if (props.max_brightness == 0) {
+               DRM_ERROR("Failed to get maximum backlight value\n");
+               return -ENODEV;
+       }
        dev_priv->backlight =
                backlight_device_register("intel_backlight",
                                          &connector->kdev, dev,
index 1881c8c83f0e0c44ab009dfed7049235c4074d97..ba8a27b1757ad97e774fe8266432161477a3561a 100644 (file)
@@ -3672,6 +3672,9 @@ static void gen3_init_clock_gating(struct drm_device *dev)
 
        if (IS_PINEVIEW(dev))
                I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
+
+       /* IIR "flip pending" means done if this bit is set */
+       I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
 }
 
 static void i85x_init_clock_gating(struct drm_device *dev)
index d81bb0bf28850f4ea734dedabec44d8478b12d44..123afd357611a6fbbd4c235aa7449afd6ac15dff 100644 (file)
@@ -2573,7 +2573,6 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
                hotplug_mask = intel_sdvo->is_sdvob ?
                        SDVOB_HOTPLUG_INT_STATUS_I915 : SDVOC_HOTPLUG_INT_STATUS_I915;
        }
-       dev_priv->hotplug_supported_mask |= hotplug_mask;
 
        drm_encoder_helper_add(&intel_encoder->base, &intel_sdvo_helper_funcs);
 
@@ -2581,14 +2580,6 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
        if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps))
                goto err;
 
-       /* Set up hotplug command - note paranoia about contents of reply.
-        * We assume that the hardware is in a sane state, and only touch
-        * the bits we think we understand.
-        */
-       intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG,
-                            &intel_sdvo->hotplug_active, 2);
-       intel_sdvo->hotplug_active[0] &= ~0x3;
-
        if (intel_sdvo_output_setup(intel_sdvo,
                                    intel_sdvo->caps.output_flags) != true) {
                DRM_DEBUG_KMS("SDVO output failed to setup on %s\n",
@@ -2596,6 +2587,12 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
                goto err;
        }
 
+       /* Only enable the hotplug irq if we need it, to work around noisy
+        * hotplug lines.
+        */
+       if (intel_sdvo->hotplug_active[0])
+               dev_priv->hotplug_supported_mask |= hotplug_mask;
+
        intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg);
 
        /* Set the input timing to the screen. Assume always input 0. */
index ea1024d79974a0f05b91c0e763ca6929266f5a72..e5f145d2cb3bac2565051a65cfbb07a8cf206c36 100644 (file)
@@ -84,6 +84,9 @@ static const struct file_operations mgag200_driver_fops = {
        .mmap = mgag200_mmap,
        .poll = drm_poll,
        .fasync = drm_fasync,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl = drm_compat_ioctl,
+#endif
        .read = drm_read,
 };
 
index 69688ef5cf46802d82922046577ada8d27f3a82c..7e16dc5e64672929e80a2aecf4b6e6e2446641b8 100644 (file)
@@ -598,7 +598,7 @@ nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
        args->size = args->pitch * args->height;
        args->size = roundup(args->size, PAGE_SIZE);
 
-       ret = nouveau_gem_new(dev, args->size, 0, TTM_PL_FLAG_VRAM, 0, 0, &bo);
+       ret = nouveau_gem_new(dev, args->size, 0, NOUVEAU_GEM_DOMAIN_VRAM, 0, 0, &bo);
        if (ret)
                return ret;
 
index f429e6a8ca7aeba09b3f8ef852376ea8d360c4ee..f03490534893e4772a75b4b4731b69be6286f744 100644 (file)
@@ -115,6 +115,9 @@ nv50_gpio_init(struct drm_device *dev)
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
 
+       /* initialise gpios and routing to vbios defaults */
+       nouveau_gpio_reset(dev);
+
        /* disable, and ack any pending gpio interrupts */
        nv_wr32(dev, 0xe050, 0x00000000);
        nv_wr32(dev, 0xe054, 0xffffffff);
index dac525b2994ee4bd28ff862625e7a6fe7649d2cd..8a2fc89b7763cc278c65a2ba20382165cde4a203 100644 (file)
@@ -1510,10 +1510,10 @@ nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
        case OUTPUT_DP:
                if (nv_connector->base.display_info.bpc == 6) {
                        nv_encoder->dp.datarate = mode->clock * 18 / 8;
-                       syncs |= 0x00000140;
+                       syncs |= 0x00000002 << 6;
                } else {
                        nv_encoder->dp.datarate = mode->clock * 24 / 8;
-                       syncs |= 0x00000180;
+                       syncs |= 0x00000005 << 6;
                }
 
                if (nv_encoder->dcb->sorconf.link & 1)
index 2817101fb167eb1f70ac36d88e2f4f1c58dd2427..e721e3087b99d88556c39701605faf37e99fbc8b 100644 (file)
@@ -1479,14 +1479,98 @@ static void radeon_legacy_atom_fixup(struct drm_crtc *crtc)
        }
 }
 
+/**
+ * radeon_get_pll_use_mask - look up a mask of which pplls are in use
+ *
+ * @crtc: drm crtc
+ *
+ * Returns the mask of which PPLLs (Pixel PLLs) are in use.
+ */
+static u32 radeon_get_pll_use_mask(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_crtc *test_crtc;
+       struct radeon_crtc *radeon_test_crtc;
+       u32 pll_in_use = 0;
+
+       list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
+               if (crtc == test_crtc)
+                       continue;
+
+               radeon_test_crtc = to_radeon_crtc(test_crtc);
+               if (radeon_test_crtc->pll_id != ATOM_PPLL_INVALID)
+                       pll_in_use |= (1 << radeon_test_crtc->pll_id);
+       }
+       return pll_in_use;
+}
+
+/**
+ * radeon_get_shared_dp_ppll - return the PPLL used by another crtc for DP
+ *
+ * @crtc: drm crtc
+ *
+ * Returns the PPLL (Pixel PLL) used by another crtc/encoder which is
+ * also in DP mode.  For DP, a single PPLL can be used for all DP
+ * crtcs/encoders.
+ */
+static int radeon_get_shared_dp_ppll(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_encoder *test_encoder;
+       struct radeon_crtc *radeon_test_crtc;
+
+       list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) {
+               if (test_encoder->crtc && (test_encoder->crtc != crtc)) {
+                       if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_encoder))) {
+                               /* for DP use the same PLL for all */
+                               radeon_test_crtc = to_radeon_crtc(test_encoder->crtc);
+                               if (radeon_test_crtc->pll_id != ATOM_PPLL_INVALID)
+                                       return radeon_test_crtc->pll_id;
+                       }
+               }
+       }
+       return ATOM_PPLL_INVALID;
+}
+
+/**
+ * radeon_atom_pick_pll - Allocate a PPLL for use by the crtc.
+ *
+ * @crtc: drm crtc
+ *
+ * Returns the PPLL (Pixel PLL) to be used by the crtc.  For DP monitors
+ * a single PPLL can be used for all DP crtcs/encoders.  For non-DP
+ * monitors a dedicated PPLL must be used.  If a particular board has
+ * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
+ * as there is no need to program the PLL itself.  If we are not able to
+ * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
+ * avoid messing up an existing monitor.
+ *
+ * Asic specific PLL information
+ *
+ * DCE 6.1
+ * - PPLL2 is only available to UNIPHYA (both DP and non-DP)
+ * - PPLL0, PPLL1 are available for UNIPHYB/C/D/E/F (both DP and non-DP)
+ *
+ * DCE 6.0
+ * - PPLL0 is available to all UNIPHY (DP only)
+ * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
+ *
+ * DCE 5.0
+ * - DCPLL is available to all UNIPHY (DP only)
+ * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
+ *
+ * DCE 3.0/4.0/4.1
+ * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
+ *
+ */
 static int radeon_atom_pick_pll(struct drm_crtc *crtc)
 {
        struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
        struct drm_device *dev = crtc->dev;
        struct radeon_device *rdev = dev->dev_private;
        struct drm_encoder *test_encoder;
-       struct drm_crtc *test_crtc;
-       uint32_t pll_in_use = 0;
+       u32 pll_in_use;
+       int pll;
 
        if (ASIC_IS_DCE61(rdev)) {
                list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) {
@@ -1498,32 +1582,40 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
 
                                if ((test_radeon_encoder->encoder_id ==
                                     ENCODER_OBJECT_ID_INTERNAL_UNIPHY) &&
-                                   (dig->linkb == false)) /* UNIPHY A uses PPLL2 */
+                                   (dig->linkb == false))
+                                       /* UNIPHY A uses PPLL2 */
                                        return ATOM_PPLL2;
+                               else if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_encoder))) {
+                                       /* UNIPHY B/C/D/E/F */
+                                       if (rdev->clock.dp_extclk)
+                                               /* skip PPLL programming if using ext clock */
+                                               return ATOM_PPLL_INVALID;
+                                       else {
+                                               /* use the same PPLL for all DP monitors */
+                                               pll = radeon_get_shared_dp_ppll(crtc);
+                                               if (pll != ATOM_PPLL_INVALID)
+                                                       return pll;
+                                       }
+                               }
+                               break;
                        }
                }
                /* UNIPHY B/C/D/E/F */
-               list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
-                       struct radeon_crtc *radeon_test_crtc;
-
-                       if (crtc == test_crtc)
-                               continue;
-
-                       radeon_test_crtc = to_radeon_crtc(test_crtc);
-                       if ((radeon_test_crtc->pll_id == ATOM_PPLL0) ||
-                           (radeon_test_crtc->pll_id == ATOM_PPLL1))
-                               pll_in_use |= (1 << radeon_test_crtc->pll_id);
-               }
-               if (!(pll_in_use & 4))
+               pll_in_use = radeon_get_pll_use_mask(crtc);
+               if (!(pll_in_use & (1 << ATOM_PPLL0)))
                        return ATOM_PPLL0;
-               return ATOM_PPLL1;
+               if (!(pll_in_use & (1 << ATOM_PPLL1)))
+                       return ATOM_PPLL1;
+               DRM_ERROR("unable to allocate a PPLL\n");
+               return ATOM_PPLL_INVALID;
        } else if (ASIC_IS_DCE4(rdev)) {
                list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) {
                        if (test_encoder->crtc && (test_encoder->crtc == crtc)) {
                                /* in DP mode, the DP ref clock can come from PPLL, DCPLL, or ext clock,
                                 * depending on the asic:
                                 * DCE4: PPLL or ext clock
-                                * DCE5: DCPLL or ext clock
+                                * DCE5: PPLL, DCPLL, or ext clock
+                                * DCE6: PPLL, PPLL0, or ext clock
                                 *
                                 * Setting ATOM_PPLL_INVALID will cause SetPixelClock to skip
                                 * PPLL/DCPLL programming and only program the DP DTO for the
@@ -1531,31 +1623,34 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
                                 */
                                if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_encoder))) {
                                        if (rdev->clock.dp_extclk)
+                                               /* skip PPLL programming if using ext clock */
                                                return ATOM_PPLL_INVALID;
                                        else if (ASIC_IS_DCE6(rdev))
+                                               /* use PPLL0 for all DP */
                                                return ATOM_PPLL0;
                                        else if (ASIC_IS_DCE5(rdev))
+                                               /* use DCPLL for all DP */
                                                return ATOM_DCPLL;
+                                       else {
+                                               /* use the same PPLL for all DP monitors */
+                                               pll = radeon_get_shared_dp_ppll(crtc);
+                                               if (pll != ATOM_PPLL_INVALID)
+                                                       return pll;
+                                       }
                                }
+                               break;
                        }
                }
-
-               /* otherwise, pick one of the plls */
-               list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
-                       struct radeon_crtc *radeon_test_crtc;
-
-                       if (crtc == test_crtc)
-                               continue;
-
-                       radeon_test_crtc = to_radeon_crtc(test_crtc);
-                       if ((radeon_test_crtc->pll_id >= ATOM_PPLL1) &&
-                           (radeon_test_crtc->pll_id <= ATOM_PPLL2))
-                               pll_in_use |= (1 << radeon_test_crtc->pll_id);
-               }
-               if (!(pll_in_use & 1))
+               /* all other cases */
+               pll_in_use = radeon_get_pll_use_mask(crtc);
+               if (!(pll_in_use & (1 << ATOM_PPLL2)))
+                       return ATOM_PPLL2;
+               if (!(pll_in_use & (1 << ATOM_PPLL1)))
                        return ATOM_PPLL1;
-               return ATOM_PPLL2;
+               DRM_ERROR("unable to allocate a PPLL\n");
+               return ATOM_PPLL_INVALID;
        } else
+               /* use PPLL1 or PPLL2 */
                return radeon_crtc->crtc_id;
 
 }
@@ -1697,7 +1792,7 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
                break;
        }
 done:
-       radeon_crtc->pll_id = -1;
+       radeon_crtc->pll_id = ATOM_PPLL_INVALID;
 }
 
 static const struct drm_crtc_helper_funcs atombios_helper_funcs = {
@@ -1746,6 +1841,6 @@ void radeon_atombios_init_crtc(struct drm_device *dev,
                else
                        radeon_crtc->crtc_offset = 0;
        }
-       radeon_crtc->pll_id = -1;
+       radeon_crtc->pll_id = ATOM_PPLL_INVALID;
        drm_crtc_helper_add(&radeon_crtc->base, &atombios_helper_funcs);
 }
index 7b737b9339ad41e136a96237817ef099bfae3c10..2a59375dbe5205f64764771de6abfe7184a90be9 100644 (file)
@@ -131,7 +131,7 @@ int radeon_fence_emit(struct radeon_device *rdev,
  */
 void radeon_fence_process(struct radeon_device *rdev, int ring)
 {
-       uint64_t seq, last_seq;
+       uint64_t seq, last_seq, last_emitted;
        unsigned count_loop = 0;
        bool wake = false;
 
@@ -158,13 +158,15 @@ void radeon_fence_process(struct radeon_device *rdev, int ring)
         */
        last_seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
        do {
+               last_emitted = rdev->fence_drv[ring].sync_seq[ring];
                seq = radeon_fence_read(rdev, ring);
                seq |= last_seq & 0xffffffff00000000LL;
                if (seq < last_seq) {
-                       seq += 0x100000000LL;
+                       seq &= 0xffffffff;
+                       seq |= last_emitted & 0xffffffff00000000LL;
                }
 
-               if (seq == last_seq) {
+               if (seq <= last_seq || seq > last_emitted) {
                        break;
                }
                /* If we loop over we don't want to return without
index d31d4cca9a4c7425609cf92d9e8fcb5a512cc4f0..c5a164337bd5deaf7fd85f15b94f0f8b21fbba88 100644 (file)
@@ -43,6 +43,9 @@ static const struct file_operations savage_driver_fops = {
        .mmap = drm_mmap,
        .poll = drm_poll,
        .fasync = drm_fasync,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl = drm_compat_ioctl,
+#endif
        .llseek = noop_llseek,
 };
 
index 7f119870147c04cee307eaded5e6bf1abfba2bff..867dc03000e62007f470fc963706369d7411b90c 100644 (file)
@@ -74,6 +74,9 @@ static const struct file_operations sis_driver_fops = {
        .mmap = drm_mmap,
        .poll = drm_poll,
        .fasync = drm_fasync,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl = drm_compat_ioctl,
+#endif
        .llseek = noop_llseek,
 };
 
index 90f6b13acfac416270ac5abb70fa919cf66b611a..a7f4d6bd1330de6ad9e5878be70019d3e6ebf2df 100644 (file)
@@ -49,6 +49,9 @@ static const struct file_operations tdfx_driver_fops = {
        .mmap = drm_mmap,
        .poll = drm_poll,
        .fasync = drm_fasync,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl = drm_compat_ioctl,
+#endif
        .llseek = noop_llseek,
 };
 
index 6e52069894b35d91037474521e5ebf6e2f157e98..9f84128505bb420703745d426f0cdba4c8a46ab6 100644 (file)
@@ -66,6 +66,9 @@ static const struct file_operations udl_driver_fops = {
        .unlocked_ioctl = drm_ioctl,
        .release = drm_release,
        .fasync = drm_fasync,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl = drm_compat_ioctl,
+#endif
        .llseek = noop_llseek,
 };
 
index e927b4c052f52a4ed2e73a296354a0e84c849f20..af1b914b17e399a8de2265bd8cbdff32ed553a06 100644 (file)
@@ -65,6 +65,9 @@ static const struct file_operations via_driver_fops = {
        .mmap = drm_mmap,
        .poll = drm_poll,
        .fasync = drm_fasync,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl = drm_compat_ioctl,
+#endif
        .llseek = noop_llseek,
 };
 
index 794ff67c5701386ce781abe5c0f91e42de150e26..b71bcd0bfbbf65a60dea1ea47beef41daddb7d8c 100644 (file)
@@ -12,3 +12,11 @@ config DRM_VMWGFX
          This is a KMS enabled DRM driver for the VMware SVGA2
          virtual hardware.
          The compiled module will be called "vmwgfx.ko".
+
+config DRM_VMWGFX_FBCON
+       depends on DRM_VMWGFX
+       bool "Enable framebuffer console under vmwgfx by default"
+       help
+          Choose this option if you are shipping a new vmwgfx
+          userspace driver that supports using the kernel driver.
+
index 4d9edead01acdbcb900a3fdca87d5ed08cdab682..ba2c35dbf10e3a7c90095d3fce4265dd3cfd14ff 100644 (file)
@@ -182,8 +182,9 @@ static struct pci_device_id vmw_pci_id_list[] = {
        {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
        {0, 0, 0}
 };
+MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
 
-static int enable_fbdev;
+static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON);
 
 static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
 static void vmw_master_init(struct vmw_master *);
@@ -1154,6 +1155,11 @@ static struct drm_driver driver = {
        .open = vmw_driver_open,
        .preclose = vmw_preclose,
        .postclose = vmw_postclose,
+
+       .dumb_create = vmw_dumb_create,
+       .dumb_map_offset = vmw_dumb_map_offset,
+       .dumb_destroy = vmw_dumb_destroy,
+
        .fops = &vmwgfx_driver_fops,
        .name = VMWGFX_DRIVER_NAME,
        .desc = VMWGFX_DRIVER_DESC,
index d0f2c079ee2732d62f064b2667747412b5b6fb1c..29c984ff7f23aed1012e70d7e3e0744c1d1fa026 100644 (file)
@@ -645,6 +645,16 @@ int vmw_kms_readback(struct vmw_private *dev_priv,
 int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
                                struct drm_file *file_priv);
 
+int vmw_dumb_create(struct drm_file *file_priv,
+                   struct drm_device *dev,
+                   struct drm_mode_create_dumb *args);
+
+int vmw_dumb_map_offset(struct drm_file *file_priv,
+                       struct drm_device *dev, uint32_t handle,
+                       uint64_t *offset);
+int vmw_dumb_destroy(struct drm_file *file_priv,
+                    struct drm_device *dev,
+                    uint32_t handle);
 /**
  * Overlay control - vmwgfx_overlay.c
  */
index 22bf9a21ec7137a38735feea3c131ce72f156516..2c6ffe0e2c07828bc7b07e3a9bf55b71e7d438f6 100644 (file)
@@ -1917,3 +1917,76 @@ err_ref:
        vmw_resource_unreference(&res);
        return ret;
 }
+
+
+int vmw_dumb_create(struct drm_file *file_priv,
+                   struct drm_device *dev,
+                   struct drm_mode_create_dumb *args)
+{
+       struct vmw_private *dev_priv = vmw_priv(dev);
+       struct vmw_master *vmaster = vmw_master(file_priv->master);
+       struct vmw_user_dma_buffer *vmw_user_bo;
+       struct ttm_buffer_object *tmp;
+       int ret;
+
+       args->pitch = args->width * ((args->bpp + 7) / 8);
+       args->size = args->pitch * args->height;
+
+       vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
+       if (vmw_user_bo == NULL)
+               return -ENOMEM;
+
+       ret = ttm_read_lock(&vmaster->lock, true);
+       if (ret != 0) {
+               kfree(vmw_user_bo);
+               return ret;
+       }
+
+       ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, args->size,
+                             &vmw_vram_sys_placement, true,
+                             &vmw_user_dmabuf_destroy);
+       if (ret != 0)
+               goto out_no_dmabuf;
+
+       tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
+       ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
+                                  &vmw_user_bo->base,
+                                  false,
+                                  ttm_buffer_type,
+                                  &vmw_user_dmabuf_release, NULL);
+       if (unlikely(ret != 0))
+               goto out_no_base_object;
+
+       args->handle = vmw_user_bo->base.hash.key;
+
+out_no_base_object:
+       ttm_bo_unref(&tmp);
+out_no_dmabuf:
+       ttm_read_unlock(&vmaster->lock);
+       return ret;
+}
+
+int vmw_dumb_map_offset(struct drm_file *file_priv,
+                       struct drm_device *dev, uint32_t handle,
+                       uint64_t *offset)
+{
+       struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+       struct vmw_dma_buffer *out_buf;
+       int ret;
+
+       ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf);
+       if (ret != 0)
+               return -EINVAL;
+
+       *offset = out_buf->base.addr_space_offset;
+       vmw_dmabuf_unreference(&out_buf);
+       return 0;
+}
+
+int vmw_dumb_destroy(struct drm_file *file_priv,
+                    struct drm_device *dev,
+                    uint32_t handle)
+{
+       return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
+                                        handle, TTM_REF_USAGE);
+}
index 8bf8a64e511543989c41624338304d0f11f0c59b..8bcd168fffaebbf80808fa9ee9c02e42384eb1f5 100644 (file)
@@ -996,7 +996,8 @@ static void hid_process_event(struct hid_device *hid, struct hid_field *field,
        struct hid_driver *hdrv = hid->driver;
        int ret;
 
-       hid_dump_input(hid, usage, value);
+       if (!list_empty(&hid->debug_list))
+               hid_dump_input(hid, usage, value);
 
        if (hdrv && hdrv->event && hid_match_usage(hid, usage)) {
                ret = hdrv->event(hid, field, usage, value);
@@ -1558,7 +1559,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPKBD) },
+#if IS_ENABLED(CONFIG_HID_LENOVO_TPKBD)
+       { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPKBD) },
+#endif
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2) },
index 0f9c146fc00d7391932bcaef9df70e4fbc55541e..4d524b5f52f5631179e9092d6ee471522d3735a3 100644 (file)
@@ -439,7 +439,7 @@ static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev)
        struct dj_report *dj_report;
        int retval;
 
-       dj_report = kzalloc(sizeof(dj_report), GFP_KERNEL);
+       dj_report = kzalloc(sizeof(struct dj_report), GFP_KERNEL);
        if (!dj_report)
                return -ENOMEM;
        dj_report->report_id = REPORT_ID_DJ_SHORT;
@@ -456,7 +456,7 @@ static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev,
        struct dj_report *dj_report;
        int retval;
 
-       dj_report = kzalloc(sizeof(dj_report), GFP_KERNEL);
+       dj_report = kzalloc(sizeof(struct dj_report), GFP_KERNEL);
        if (!dj_report)
                return -ENOMEM;
        dj_report->report_id = REPORT_ID_DJ_SHORT;
index 903eef3d3e10034f8e36974ba4e95ee746438702..991e85c7325c849d4e7c287afe28364a8e3351e6 100644 (file)
@@ -70,6 +70,7 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
+       { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2, HID_QUIRK_NO_INIT_REPORTS },
index 7f3f4a385729375c002409387d157f3565b30e04..602148299f68db03a81683ab95c14e39614e5d8d 100644 (file)
@@ -69,22 +69,6 @@ struct ina2xx_data {
        u16 regs[INA2XX_MAX_REGISTERS];
 };
 
-int ina2xx_read_word(struct i2c_client *client, int reg)
-{
-       int val = i2c_smbus_read_word_data(client, reg);
-       if (unlikely(val < 0)) {
-               dev_dbg(&client->dev,
-                       "Failed to read register: %d\n", reg);
-               return val;
-       }
-       return be16_to_cpu(val);
-}
-
-void ina2xx_write_word(struct i2c_client *client, int reg, int data)
-{
-       i2c_smbus_write_word_data(client, reg, cpu_to_be16(data));
-}
-
 static struct ina2xx_data *ina2xx_update_device(struct device *dev)
 {
        struct i2c_client *client = to_i2c_client(dev);
@@ -102,7 +86,7 @@ static struct ina2xx_data *ina2xx_update_device(struct device *dev)
 
                /* Read all registers */
                for (i = 0; i < data->registers; i++) {
-                       int rv = ina2xx_read_word(client, i);
+                       int rv = i2c_smbus_read_word_swapped(client, i);
                        if (rv < 0) {
                                ret = ERR_PTR(rv);
                                goto abort;
@@ -279,22 +263,26 @@ static int ina2xx_probe(struct i2c_client *client,
        switch (data->kind) {
        case ina219:
                /* device configuration */
-               ina2xx_write_word(client, INA2XX_CONFIG, INA219_CONFIG_DEFAULT);
+               i2c_smbus_write_word_swapped(client, INA2XX_CONFIG,
+                                            INA219_CONFIG_DEFAULT);
 
                /* set current LSB to 1mA, shunt is in uOhms */
                /* (equation 13 in datasheet) */
-               ina2xx_write_word(client, INA2XX_CALIBRATION, 40960000 / shunt);
+               i2c_smbus_write_word_swapped(client, INA2XX_CALIBRATION,
+                                            40960000 / shunt);
                dev_info(&client->dev,
                         "power monitor INA219 (Rshunt = %li uOhm)\n", shunt);
                data->registers = INA219_REGISTERS;
                break;
        case ina226:
                /* device configuration */
-               ina2xx_write_word(client, INA2XX_CONFIG, INA226_CONFIG_DEFAULT);
+               i2c_smbus_write_word_swapped(client, INA2XX_CONFIG,
+                                            INA226_CONFIG_DEFAULT);
 
                /* set current LSB to 1mA, shunt is in uOhms */
                /* (equation 1 in datasheet)*/
-               ina2xx_write_word(client, INA2XX_CALIBRATION, 5120000 / shunt);
+               i2c_smbus_write_word_swapped(client, INA2XX_CALIBRATION,
+                                            5120000 / shunt);
                dev_info(&client->dev,
                         "power monitor INA226 (Rshunt = %li uOhm)\n", shunt);
                data->registers = INA226_REGISTERS;
index 0018c7dd0097de5045f646d98e715713ea7edba4..1a174f0a3cdeb9bd854d13fa61171a63c33e41d7 100644 (file)
@@ -44,12 +44,13 @@ static ssize_t madc_read(struct device *dev,
                         struct device_attribute *devattr, char *buf)
 {
        struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
-       struct twl4030_madc_request req;
+       struct twl4030_madc_request req = {
+               .channels = 1 << attr->index,
+               .method = TWL4030_MADC_SW2,
+               .type = TWL4030_MADC_WAIT,
+       };
        long val;
 
-       req.channels = (1 << attr->index);
-       req.method = TWL4030_MADC_SW2;
-       req.func_cb = NULL;
        val = twl4030_madc_conversion(&req);
        if (val < 0)
                return val;
index 73133b1063f012416d2a957f3fc2432ace395439..6f5f98d69af7c26b2fd7b895106e11745ce7d155 100644 (file)
@@ -476,17 +476,17 @@ static int pca_init(struct i2c_adapter *adap)
                /* To avoid integer overflow, use clock/100 for calculations */
                clock = pca_clock(pca_data) / 100;
 
-               if (pca_data->i2c_clock > 10000) {
+               if (pca_data->i2c_clock > 1000000) {
                        mode = I2C_PCA_MODE_TURBO;
                        min_tlow = 14;
                        min_thi  = 5;
                        raise_fall_time = 22; /* Raise 11e-8s, Fall 11e-8s */
-               } else if (pca_data->i2c_clock > 4000) {
+               } else if (pca_data->i2c_clock > 400000) {
                        mode = I2C_PCA_MODE_FASTP;
                        min_tlow = 17;
                        min_thi  = 9;
                        raise_fall_time = 22; /* Raise 11e-8s, Fall 11e-8s */
-               } else if (pca_data->i2c_clock > 1000) {
+               } else if (pca_data->i2c_clock > 100000) {
                        mode = I2C_PCA_MODE_FAST;
                        min_tlow = 44;
                        min_thi  = 20;
index b4aaa1bd6728503b629acea078f3021fc148115a..970a1612e795566f007ccebe2b6ae811131694c9 100644 (file)
@@ -104,6 +104,7 @@ config I2C_I801
            DH89xxCC (PCH)
            Panther Point (PCH)
            Lynx Point (PCH)
+           Lynx Point-LP (PCH)
 
          This driver can also be built as a module.  If so, the module
          will be called i2c-i801.
@@ -354,9 +355,13 @@ config I2C_DAVINCI
          devices such as DaVinci NIC.
          For details please see http://www.ti.com/davinci
 
+config I2C_DESIGNWARE_CORE
+       tristate
+
 config I2C_DESIGNWARE_PLATFORM
        tristate "Synopsys DesignWare Platform"
        depends on HAVE_CLK
+       select I2C_DESIGNWARE_CORE
        help
          If you say yes to this option, support will be included for the
          Synopsys DesignWare I2C adapter. Only master mode is supported.
@@ -367,6 +372,7 @@ config I2C_DESIGNWARE_PLATFORM
 config I2C_DESIGNWARE_PCI
        tristate "Synopsys DesignWare PCI"
        depends on PCI
+       select I2C_DESIGNWARE_CORE
        help
          If you say yes to this option, support will be included for the
          Synopsys DesignWare I2C adapter. Only master mode is supported.
index ce3c2be7fb40a6cb453a92a9cc1eb89da510383f..37c4182cc98bb28520eeb1ce579bb3b601cffc01 100644 (file)
@@ -33,10 +33,11 @@ obj-$(CONFIG_I2C_AU1550)    += i2c-au1550.o
 obj-$(CONFIG_I2C_BLACKFIN_TWI) += i2c-bfin-twi.o
 obj-$(CONFIG_I2C_CPM)          += i2c-cpm.o
 obj-$(CONFIG_I2C_DAVINCI)      += i2c-davinci.o
+obj-$(CONFIG_I2C_DESIGNWARE_CORE)      += i2c-designware-core.o
 obj-$(CONFIG_I2C_DESIGNWARE_PLATFORM)  += i2c-designware-platform.o
-i2c-designware-platform-objs := i2c-designware-platdrv.o i2c-designware-core.o
+i2c-designware-platform-objs := i2c-designware-platdrv.o
 obj-$(CONFIG_I2C_DESIGNWARE_PCI)       += i2c-designware-pci.o
-i2c-designware-pci-objs := i2c-designware-pcidrv.o i2c-designware-core.o
+i2c-designware-pci-objs := i2c-designware-pcidrv.o
 obj-$(CONFIG_I2C_EG20T)                += i2c-eg20t.o
 obj-$(CONFIG_I2C_GPIO)         += i2c-gpio.o
 obj-$(CONFIG_I2C_HIGHLANDER)   += i2c-highlander.o
index 1e48bec80edfb08a0628cc816004c1955075fc42..7b8ebbefb581156ee8dd795cd6c4d458e37367f7 100644 (file)
@@ -25,6 +25,7 @@
  * ----------------------------------------------------------------------------
  *
  */
+#include <linux/export.h>
 #include <linux/clk.h>
 #include <linux/errno.h>
 #include <linux/err.h>
@@ -316,6 +317,7 @@ int i2c_dw_init(struct dw_i2c_dev *dev)
        dw_writel(dev, dev->master_cfg , DW_IC_CON);
        return 0;
 }
+EXPORT_SYMBOL_GPL(i2c_dw_init);
 
 /*
  * Waiting for bus not busy
@@ -568,12 +570,14 @@ done:
 
        return ret;
 }
+EXPORT_SYMBOL_GPL(i2c_dw_xfer);
 
 u32 i2c_dw_func(struct i2c_adapter *adap)
 {
        struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
        return dev->functionality;
 }
+EXPORT_SYMBOL_GPL(i2c_dw_func);
 
 static u32 i2c_dw_read_clear_intrbits(struct dw_i2c_dev *dev)
 {
@@ -678,17 +682,20 @@ tx_aborted:
 
        return IRQ_HANDLED;
 }
+EXPORT_SYMBOL_GPL(i2c_dw_isr);
 
 void i2c_dw_enable(struct dw_i2c_dev *dev)
 {
        /* Enable the adapter */
        dw_writel(dev, 1, DW_IC_ENABLE);
 }
+EXPORT_SYMBOL_GPL(i2c_dw_enable);
 
 u32 i2c_dw_is_enabled(struct dw_i2c_dev *dev)
 {
        return dw_readl(dev, DW_IC_ENABLE);
 }
+EXPORT_SYMBOL_GPL(i2c_dw_is_enabled);
 
 void i2c_dw_disable(struct dw_i2c_dev *dev)
 {
@@ -699,18 +706,22 @@ void i2c_dw_disable(struct dw_i2c_dev *dev)
        dw_writel(dev, 0, DW_IC_INTR_MASK);
        dw_readl(dev, DW_IC_CLR_INTR);
 }
+EXPORT_SYMBOL_GPL(i2c_dw_disable);
 
 void i2c_dw_clear_int(struct dw_i2c_dev *dev)
 {
        dw_readl(dev, DW_IC_CLR_INTR);
 }
+EXPORT_SYMBOL_GPL(i2c_dw_clear_int);
 
 void i2c_dw_disable_int(struct dw_i2c_dev *dev)
 {
        dw_writel(dev, 0, DW_IC_INTR_MASK);
 }
+EXPORT_SYMBOL_GPL(i2c_dw_disable_int);
 
 u32 i2c_dw_read_comp_param(struct dw_i2c_dev *dev)
 {
        return dw_readl(dev, DW_IC_COMP_PARAM_1);
 }
+EXPORT_SYMBOL_GPL(i2c_dw_read_comp_param);
index 898dcf9c7adeaac26d932b6e8c6a6e8d90e9be66..33e9b0c09af208762f7f8f58b4c3895798618233 100644 (file)
@@ -52,6 +52,7 @@
   DH89xxCC (PCH)        0x2330     32     hard     yes     yes     yes
   Panther Point (PCH)   0x1e22     32     hard     yes     yes     yes
   Lynx Point (PCH)      0x8c22     32     hard     yes     yes     yes
+  Lynx Point-LP (PCH)   0x9c22     32     hard     yes     yes     yes
 
   Features supported by this driver:
   Software PEC                     no
 #define PCI_DEVICE_ID_INTEL_DH89XXCC_SMBUS     0x2330
 #define PCI_DEVICE_ID_INTEL_5_3400_SERIES_SMBUS        0x3b30
 #define PCI_DEVICE_ID_INTEL_LYNXPOINT_SMBUS    0x8c22
+#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_SMBUS 0x9c22
 
 struct i801_priv {
        struct i2c_adapter adapter;
@@ -771,6 +773,7 @@ static DEFINE_PCI_DEVICE_TABLE(i801_ids) = {
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_DH89XXCC_SMBUS) },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PANTHERPOINT_SMBUS) },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNXPOINT_SMBUS) },
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_SMBUS) },
        { 0, }
 };
 
index 088c5c1ed17dfe831c4345ee8f02dd0ef4e1c82d..51f05b8520edb3f95b983d5002859afd386586e4 100644 (file)
@@ -365,10 +365,6 @@ static int mxs_i2c_get_ofdata(struct mxs_i2c_dev *i2c)
        struct device_node *node = dev->of_node;
        int ret;
 
-       if (!node)
-               return -EINVAL;
-
-       i2c->speed = &mxs_i2c_95kHz_config;
        ret = of_property_read_u32(node, "clock-frequency", &speed);
        if (ret)
                dev_warn(dev, "No I2C speed selected, using 100kHz\n");
@@ -419,10 +415,13 @@ static int __devinit mxs_i2c_probe(struct platform_device *pdev)
                return err;
 
        i2c->dev = dev;
+       i2c->speed = &mxs_i2c_95kHz_config;
 
-       err = mxs_i2c_get_ofdata(i2c);
-       if (err)
-               return err;
+       if (dev->of_node) {
+               err = mxs_i2c_get_ofdata(i2c);
+               if (err)
+                       return err;
+       }
 
        platform_set_drvdata(pdev, i2c);
 
index 5d54416770b01e7816cc85cd7dcbf403bf407442..8488bddfe46596109249edd242a3ad0ebc7cfe8b 100644 (file)
@@ -48,8 +48,9 @@ enum {
        mcntrl_afie = 0x00000002,
        mcntrl_naie = 0x00000004,
        mcntrl_drmie = 0x00000008,
-       mcntrl_daie = 0x00000020,
-       mcntrl_rffie = 0x00000040,
+       mcntrl_drsie = 0x00000010,
+       mcntrl_rffie = 0x00000020,
+       mcntrl_daie = 0x00000040,
        mcntrl_tffie = 0x00000080,
        mcntrl_reset = 0x00000100,
        mcntrl_cdbmode = 0x00000400,
@@ -290,31 +291,37 @@ static int i2c_pnx_master_rcv(struct i2c_pnx_algo_data *alg_data)
         * or we didn't 'ask' for it yet.
         */
        if (ioread32(I2C_REG_STS(alg_data)) & mstatus_rfe) {
-               dev_dbg(&alg_data->adapter.dev,
-                       "%s(): Write dummy data to fill Rx-fifo...\n",
-                       __func__);
+               /* 'Asking' is done asynchronously, e.g. dummy TX of several
+                * bytes is done before the first actual RX arrives in FIFO.
+                * Therefore, ordered bytes (via TX) are counted separately.
+                */
+               if (alg_data->mif.order) {
+                       dev_dbg(&alg_data->adapter.dev,
+                               "%s(): Write dummy data to fill Rx-fifo...\n",
+                               __func__);
 
-               if (alg_data->mif.len == 1) {
-                       /* Last byte, do not acknowledge next rcv. */
-                       val |= stop_bit;
+                       if (alg_data->mif.order == 1) {
+                               /* Last byte, do not acknowledge next rcv. */
+                               val |= stop_bit;
+
+                               /*
+                                * Enable interrupt RFDAIE (data in Rx fifo),
+                                * and disable DRMIE (need data for Tx)
+                                */
+                               ctl = ioread32(I2C_REG_CTL(alg_data));
+                               ctl |= mcntrl_rffie | mcntrl_daie;
+                               ctl &= ~mcntrl_drmie;
+                               iowrite32(ctl, I2C_REG_CTL(alg_data));
+                       }
 
                        /*
-                        * Enable interrupt RFDAIE (data in Rx fifo),
-                        * and disable DRMIE (need data for Tx)
+                        * Now we'll 'ask' for data:
+                        * For each byte we want to receive, we must
+                        * write a (dummy) byte to the Tx-FIFO.
                         */
-                       ctl = ioread32(I2C_REG_CTL(alg_data));
-                       ctl |= mcntrl_rffie | mcntrl_daie;
-                       ctl &= ~mcntrl_drmie;
-                       iowrite32(ctl, I2C_REG_CTL(alg_data));
+                       iowrite32(val, I2C_REG_TX(alg_data));
+                       alg_data->mif.order--;
                }
-
-               /*
-                * Now we'll 'ask' for data:
-                * For each byte we want to receive, we must
-                * write a (dummy) byte to the Tx-FIFO.
-                */
-               iowrite32(val, I2C_REG_TX(alg_data));
-
                return 0;
        }
 
@@ -514,6 +521,7 @@ i2c_pnx_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
 
                alg_data->mif.buf = pmsg->buf;
                alg_data->mif.len = pmsg->len;
+               alg_data->mif.order = pmsg->len;
                alg_data->mif.mode = (pmsg->flags & I2C_M_RD) ?
                        I2C_SMBUS_READ : I2C_SMBUS_WRITE;
                alg_data->mif.ret = 0;
@@ -566,6 +574,7 @@ i2c_pnx_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
        /* Cleanup to be sure... */
        alg_data->mif.buf = NULL;
        alg_data->mif.len = 0;
+       alg_data->mif.order = 0;
 
        dev_dbg(&alg_data->adapter.dev, "%s(): exiting, stat = %x\n",
                __func__, ioread32(I2C_REG_STS(alg_data)));
index 2efa56c5ff2c32d10ff3018def5bc077b8492e4e..2091ae8f539a5e78ee59338cd29876109b22883d 100644 (file)
@@ -636,6 +636,22 @@ static void i2c_adapter_dev_release(struct device *dev)
        complete(&adap->dev_released);
 }
 
+/*
+ * This function is only needed for mutex_lock_nested, so it is never
+ * called unless locking correctness checking is enabled. Thus we
+ * make it inline to avoid a compiler warning. That's what gcc ends up
+ * doing anyway.
+ */
+static inline unsigned int i2c_adapter_depth(struct i2c_adapter *adapter)
+{
+       unsigned int depth = 0;
+
+       while ((adapter = i2c_parent_is_i2c_adapter(adapter)))
+               depth++;
+
+       return depth;
+}
+
 /*
  * Let users instantiate I2C devices through sysfs. This can be used when
  * platform initialization code doesn't contain the proper data for
@@ -726,7 +742,8 @@ i2c_sysfs_delete_device(struct device *dev, struct device_attribute *attr,
 
        /* Make sure the device was added through sysfs */
        res = -ENOENT;
-       mutex_lock(&adap->userspace_clients_lock);
+       mutex_lock_nested(&adap->userspace_clients_lock,
+                         i2c_adapter_depth(adap));
        list_for_each_entry_safe(client, next, &adap->userspace_clients,
                                 detected) {
                if (client->addr == addr) {
@@ -1073,7 +1090,8 @@ int i2c_del_adapter(struct i2c_adapter *adap)
                return res;
 
        /* Remove devices instantiated from sysfs */
-       mutex_lock(&adap->userspace_clients_lock);
+       mutex_lock_nested(&adap->userspace_clients_lock,
+                         i2c_adapter_depth(adap));
        list_for_each_entry_safe(client, next, &adap->userspace_clients,
                                 detected) {
                dev_dbg(&adap->dev, "Removing %s at 0x%x\n", client->name,
index f61780a02374d1f855af861092c3ebae5b35cd80..3bd5540238a7e6d683fd903cfacc14c8b05b92d5 100644 (file)
@@ -617,7 +617,7 @@ static int __devinit at91_adc_probe(struct platform_device *pdev)
        st->adc_clk = clk_get(&pdev->dev, "adc_op_clk");
        if (IS_ERR(st->adc_clk)) {
                dev_err(&pdev->dev, "Failed to get the ADC clock.\n");
-               ret = PTR_ERR(st->clk);
+               ret = PTR_ERR(st->adc_clk);
                goto error_disable_clk;
        }
 
index ff4c0a87a25f9804441ac90561e7c97e24610e85..ce68e361558c650ae8eb2032798048c8030cc309 100644 (file)
@@ -358,6 +358,7 @@ static void imx_keypad_inhibit(struct imx_keypad *keypad)
        /* Inhibit KDI and KRI interrupts. */
        reg_val = readw(keypad->mmio_base + KPSR);
        reg_val &= ~(KBD_STAT_KRIE | KBD_STAT_KDIE);
+       reg_val |= KBD_STAT_KPKR | KBD_STAT_KPKD;
        writew(reg_val, keypad->mmio_base + KPSR);
 
        /* Colums as open drain and disable all rows */
@@ -515,7 +516,9 @@ static int __devinit imx_keypad_probe(struct platform_device *pdev)
        input_set_drvdata(input_dev, keypad);
 
        /* Ensure that the keypad will stay dormant until opened */
+       clk_enable(keypad->clk);
        imx_keypad_inhibit(keypad);
+       clk_disable(keypad->clk);
 
        error = request_irq(irq, imx_keypad_irq_handler, 0,
                            pdev->name, keypad);
index 5ec774d6c82b4be13f06c97efffe7cc42860ebab..6918773ce02443e67181c62823023597e3ba44ce 100644 (file)
@@ -176,6 +176,20 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "Spring Peak"),
                },
        },
+       {
+               /* Gigabyte T1005 - defines wrong chassis type ("Other") */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "T1005"),
+               },
+       },
+       {
+               /* Gigabyte T1005M/P - defines wrong chassis type ("Other") */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "T1005M/P"),
+               },
+       },
        {
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
index 002041975de9c902f193f2448865d051f0ce98dc..532d067a9e07a0745188eb3dab8e4025ef7b664f 100644 (file)
@@ -1848,7 +1848,10 @@ static const struct wacom_features wacom_features_0x2A =
        { "Wacom Intuos5 M", WACOM_PKGLEN_INTUOS,  44704, 27940, 2047,
          63, INTUOS5, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
 static const struct wacom_features wacom_features_0xF4 =
-       { "Wacom Cintiq 24HD",    WACOM_PKGLEN_INTUOS,   104480, 65600, 2047,
+       { "Wacom Cintiq 24HD",       WACOM_PKGLEN_INTUOS,   104480, 65600, 2047,
+         63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+static const struct wacom_features wacom_features_0xF8 =
+       { "Wacom Cintiq 24HD touch", WACOM_PKGLEN_INTUOS,   104480, 65600, 2047,
          63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
 static const struct wacom_features wacom_features_0x3F =
        { "Wacom Cintiq 21UX",    WACOM_PKGLEN_INTUOS,    87200, 65600, 1023,
@@ -2091,6 +2094,7 @@ const struct usb_device_id wacom_ids[] = {
        { USB_DEVICE_WACOM(0xEF) },
        { USB_DEVICE_WACOM(0x47) },
        { USB_DEVICE_WACOM(0xF4) },
+       { USB_DEVICE_WACOM(0xF8) },
        { USB_DEVICE_WACOM(0xFA) },
        { USB_DEVICE_LENOVO(0x6004) },
        { }
index 9afc777a40a7077a5bbd667944f1c1ee3bac38d7..b06a5e3a665ea1864c8824f6d7d172f87d0b3ac1 100644 (file)
@@ -602,6 +602,7 @@ edt_ft5x06_ts_teardown_debugfs(struct edt_ft5x06_ts_data *tsdata)
 {
        if (tsdata->debug_dir)
                debugfs_remove_recursive(tsdata->debug_dir);
+       kfree(tsdata->raw_buffer);
 }
 
 #else
@@ -843,7 +844,6 @@ static int __devexit edt_ft5x06_ts_remove(struct i2c_client *client)
        if (gpio_is_valid(pdata->reset_pin))
                gpio_free(pdata->reset_pin);
 
-       kfree(tsdata->raw_buffer);
        kfree(tsdata);
 
        return 0;
index fa6ca473372539fda128a7a1ec46d9c4d13c3c4a..dceaec821b0e5324cbfa2d92bdb7334845bf9262 100644 (file)
@@ -857,8 +857,9 @@ avm_bctrl(struct mISDNchannel *ch, u32 cmd, void *arg)
        switch (cmd) {
        case CLOSE_CHANNEL:
                test_and_clear_bit(FLG_OPEN, &bch->Flags);
+               cancel_work_sync(&bch->workq);
                spin_lock_irqsave(&fc->lock, flags);
-               mISDN_freebchannel(bch);
+               mISDN_clear_bchannel(bch);
                modehdlc(bch, ISDN_P_NONE);
                spin_unlock_irqrestore(&fc->lock, flags);
                ch->protocol = ISDN_P_NONE;
index 5e402cf2e79506b82288140334da60eb4e923e6d..f02794203bb193b41291efc3bc6d8457b2043883 100644 (file)
@@ -5059,6 +5059,7 @@ hfcmulti_init(struct hm_map *m, struct pci_dev *pdev,
                                printk(KERN_INFO
                                       "HFC-E1 #%d has overlapping B-channels on fragment #%d\n",
                                       E1_cnt + 1, pt);
+                               kfree(hc);
                                return -EINVAL;
                        }
                        maskcheck |= hc->bmask[pt];
@@ -5086,6 +5087,7 @@ hfcmulti_init(struct hm_map *m, struct pci_dev *pdev,
        if ((poll >> 1) > sizeof(hc->silence_data)) {
                printk(KERN_ERR "HFCMULTI error: silence_data too small, "
                       "please fix\n");
+               kfree(hc);
                return -EINVAL;
        }
        for (i = 0; i < (poll >> 1); i++)
index 752e0825591fbed9e820044495d42b9a80842320..ccd7d851be26d27913a26656cdef1fc838c90870 100644 (file)
@@ -1406,8 +1406,9 @@ hscx_bctrl(struct mISDNchannel *ch, u32 cmd, void *arg)
        switch (cmd) {
        case CLOSE_CHANNEL:
                test_and_clear_bit(FLG_OPEN, &bch->Flags);
+               cancel_work_sync(&bch->workq);
                spin_lock_irqsave(hx->ip->hwlock, flags);
-               mISDN_freebchannel(bch);
+               mISDN_clear_bchannel(bch);
                hscx_mode(hx, ISDN_P_NONE);
                spin_unlock_irqrestore(hx->ip->hwlock, flags);
                ch->protocol = ISDN_P_NONE;
index be5973ded6d6e4288fe8fdebd0967150228ff937..182ecf0626c2098e3c38c4da0eeea54a1197ce7d 100644 (file)
@@ -1588,8 +1588,9 @@ isar_bctrl(struct mISDNchannel *ch, u32 cmd, void *arg)
        switch (cmd) {
        case CLOSE_CHANNEL:
                test_and_clear_bit(FLG_OPEN, &bch->Flags);
+               cancel_work_sync(&bch->workq);
                spin_lock_irqsave(ich->is->hwlock, flags);
-               mISDN_freebchannel(bch);
+               mISDN_clear_bchannel(bch);
                modeisar(ich, ISDN_P_NONE);
                spin_unlock_irqrestore(ich->is->hwlock, flags);
                ch->protocol = ISDN_P_NONE;
index c3e3e76862731496b6bea5d35b3ca8ef5662e486..9bcade59eb73bdf24f72e8fa5a6e08e4be641f1c 100644 (file)
@@ -812,8 +812,9 @@ nj_bctrl(struct mISDNchannel *ch, u32 cmd, void *arg)
        switch (cmd) {
        case CLOSE_CHANNEL:
                test_and_clear_bit(FLG_OPEN, &bch->Flags);
+               cancel_work_sync(&bch->workq);
                spin_lock_irqsave(&card->lock, flags);
-               mISDN_freebchannel(bch);
+               mISDN_clear_bchannel(bch);
                mode_tiger(bc, ISDN_P_NONE);
                spin_unlock_irqrestore(&card->lock, flags);
                ch->protocol = ISDN_P_NONE;
index 26a86b8460992e5e98c722f8b6487fb8ff1fe932..335fe6455002c708cfb0be66318b2473f4cdfcc7 100644 (file)
@@ -1054,8 +1054,9 @@ w6692_bctrl(struct mISDNchannel *ch, u32 cmd, void *arg)
        switch (cmd) {
        case CLOSE_CHANNEL:
                test_and_clear_bit(FLG_OPEN, &bch->Flags);
+               cancel_work_sync(&bch->workq);
                spin_lock_irqsave(&card->lock, flags);
-               mISDN_freebchannel(bch);
+               mISDN_clear_bchannel(bch);
                w6692_mode(bc, ISDN_P_NONE);
                spin_unlock_irqrestore(&card->lock, flags);
                ch->protocol = ISDN_P_NONE;
index ef34fd40867cb6b7f5b767542c4da2e173d6b9cc..2602be23f341287468524c9fbd79e33a18e21aa6 100644 (file)
@@ -148,17 +148,16 @@ mISDN_clear_bchannel(struct bchannel *ch)
        ch->next_minlen = ch->init_minlen;
        ch->maxlen = ch->init_maxlen;
        ch->next_maxlen = ch->init_maxlen;
+       skb_queue_purge(&ch->rqueue);
+       ch->rcount = 0;
 }
 EXPORT_SYMBOL(mISDN_clear_bchannel);
 
-int
+void
 mISDN_freebchannel(struct bchannel *ch)
 {
+       cancel_work_sync(&ch->workq);
        mISDN_clear_bchannel(ch);
-       skb_queue_purge(&ch->rqueue);
-       ch->rcount = 0;
-       flush_work_sync(&ch->workq);
-       return 0;
 }
 EXPORT_SYMBOL(mISDN_freebchannel);
 
index b67a3018b13645f2e15f9c773f92d6f9e5acba06..ce229ea933d1388467aed017b9f65d4a2f9ef3a1 100644 (file)
@@ -470,7 +470,8 @@ static int __devinit device_800_init(struct pm80x_chip *chip,
 
        ret =
            mfd_add_devices(chip->dev, 0, &onkey_devs[0],
-                           ARRAY_SIZE(onkey_devs), &onkey_resources[0], 0);
+                           ARRAY_SIZE(onkey_devs), &onkey_resources[0], 0,
+                           NULL);
        if (ret < 0) {
                dev_err(chip->dev, "Failed to add onkey subdev\n");
                goto out_dev;
@@ -481,7 +482,7 @@ static int __devinit device_800_init(struct pm80x_chip *chip,
                rtc_devs[0].platform_data = pdata->rtc;
                rtc_devs[0].pdata_size = sizeof(struct pm80x_rtc_pdata);
                ret = mfd_add_devices(chip->dev, 0, &rtc_devs[0],
-                                     ARRAY_SIZE(rtc_devs), NULL, 0);
+                                     ARRAY_SIZE(rtc_devs), NULL, 0, NULL);
                if (ret < 0) {
                        dev_err(chip->dev, "Failed to add rtc subdev\n");
                        goto out_dev;
index 6146583589f61b53af6918db338861f116596a5b..c20a31136f045ccd57c0c19ec62ceb6b229bc861 100644 (file)
@@ -216,7 +216,8 @@ static int __devinit device_805_init(struct pm80x_chip *chip)
        }
 
        ret = mfd_add_devices(chip->dev, 0, &codec_devs[0],
-                             ARRAY_SIZE(codec_devs), &codec_resources[0], 0);
+                             ARRAY_SIZE(codec_devs), &codec_resources[0], 0,
+                             NULL);
        if (ret < 0) {
                dev_err(chip->dev, "Failed to add codec subdev\n");
                goto out_codec;
index d09918cf1b1556a74edb622e0174d3ceffdf7ca6..b73f033b2c602fadce09dd97d0c7623581962e12 100644 (file)
@@ -637,7 +637,7 @@ static void __devinit device_bk_init(struct pm860x_chip *chip,
                        bk_devs[i].resources = &bk_resources[j];
                        ret = mfd_add_devices(chip->dev, 0,
                                              &bk_devs[i], 1,
-                                             &bk_resources[j], 0);
+                                             &bk_resources[j], 0, NULL);
                        if (ret < 0) {
                                dev_err(chip->dev, "Failed to add "
                                        "backlight subdev\n");
@@ -672,7 +672,7 @@ static void __devinit device_led_init(struct pm860x_chip *chip,
                        led_devs[i].resources = &led_resources[j],
                        ret = mfd_add_devices(chip->dev, 0,
                                              &led_devs[i], 1,
-                                             &led_resources[j], 0);
+                                             &led_resources[j], 0, NULL);
                        if (ret < 0) {
                                dev_err(chip->dev, "Failed to add "
                                        "led subdev\n");
@@ -709,7 +709,7 @@ static void __devinit device_regulator_init(struct pm860x_chip *chip,
                regulator_devs[i].resources = &regulator_resources[seq];
 
                ret = mfd_add_devices(chip->dev, 0, &regulator_devs[i], 1,
-                                     &regulator_resources[seq], 0);
+                                     &regulator_resources[seq], 0, NULL);
                if (ret < 0) {
                        dev_err(chip->dev, "Failed to add regulator subdev\n");
                        goto out;
@@ -733,7 +733,7 @@ static void __devinit device_rtc_init(struct pm860x_chip *chip,
        rtc_devs[0].resources = &rtc_resources[0];
        ret = mfd_add_devices(chip->dev, 0, &rtc_devs[0],
                              ARRAY_SIZE(rtc_devs), &rtc_resources[0],
-                             chip->irq_base);
+                             chip->irq_base, NULL);
        if (ret < 0)
                dev_err(chip->dev, "Failed to add rtc subdev\n");
 }
@@ -752,7 +752,7 @@ static void __devinit device_touch_init(struct pm860x_chip *chip,
        touch_devs[0].resources = &touch_resources[0];
        ret = mfd_add_devices(chip->dev, 0, &touch_devs[0],
                              ARRAY_SIZE(touch_devs), &touch_resources[0],
-                             chip->irq_base);
+                             chip->irq_base, NULL);
        if (ret < 0)
                dev_err(chip->dev, "Failed to add touch subdev\n");
 }
@@ -770,7 +770,7 @@ static void __devinit device_power_init(struct pm860x_chip *chip,
        power_devs[0].num_resources = ARRAY_SIZE(battery_resources);
        power_devs[0].resources = &battery_resources[0],
        ret = mfd_add_devices(chip->dev, 0, &power_devs[0], 1,
-                             &battery_resources[0], chip->irq_base);
+                             &battery_resources[0], chip->irq_base, NULL);
        if (ret < 0)
                dev_err(chip->dev, "Failed to add battery subdev\n");
 
@@ -779,7 +779,7 @@ static void __devinit device_power_init(struct pm860x_chip *chip,
        power_devs[1].num_resources = ARRAY_SIZE(charger_resources);
        power_devs[1].resources = &charger_resources[0],
        ret = mfd_add_devices(chip->dev, 0, &power_devs[1], 1,
-                             &charger_resources[0], chip->irq_base);
+                             &charger_resources[0], chip->irq_base, NULL);
        if (ret < 0)
                dev_err(chip->dev, "Failed to add charger subdev\n");
 
@@ -788,7 +788,7 @@ static void __devinit device_power_init(struct pm860x_chip *chip,
        power_devs[2].num_resources = ARRAY_SIZE(preg_resources);
        power_devs[2].resources = &preg_resources[0],
        ret = mfd_add_devices(chip->dev, 0, &power_devs[2], 1,
-                             &preg_resources[0], chip->irq_base);
+                             &preg_resources[0], chip->irq_base, NULL);
        if (ret < 0)
                dev_err(chip->dev, "Failed to add preg subdev\n");
 }
@@ -802,7 +802,7 @@ static void __devinit device_onkey_init(struct pm860x_chip *chip,
        onkey_devs[0].resources = &onkey_resources[0],
        ret = mfd_add_devices(chip->dev, 0, &onkey_devs[0],
                              ARRAY_SIZE(onkey_devs), &onkey_resources[0],
-                             chip->irq_base);
+                             chip->irq_base, NULL);
        if (ret < 0)
                dev_err(chip->dev, "Failed to add onkey subdev\n");
 }
@@ -815,7 +815,8 @@ static void __devinit device_codec_init(struct pm860x_chip *chip,
        codec_devs[0].num_resources = ARRAY_SIZE(codec_resources);
        codec_devs[0].resources = &codec_resources[0],
        ret = mfd_add_devices(chip->dev, 0, &codec_devs[0],
-                             ARRAY_SIZE(codec_devs), &codec_resources[0], 0);
+                             ARRAY_SIZE(codec_devs), &codec_resources[0], 0,
+                             NULL);
        if (ret < 0)
                dev_err(chip->dev, "Failed to add codec subdev\n");
 }
index 44a3fdbadef40df1e09b12884f44caa46b76f0ac..f1beb4971f87f580090df39b38dbdc5e9937140e 100644 (file)
@@ -424,7 +424,7 @@ static int aat2870_i2c_probe(struct i2c_client *client,
        }
 
        ret = mfd_add_devices(aat2870->dev, 0, aat2870_devs,
-                             ARRAY_SIZE(aat2870_devs), NULL, 0);
+                             ARRAY_SIZE(aat2870_devs), NULL, 0, NULL);
        if (ret != 0) {
                dev_err(aat2870->dev, "Failed to add subdev: %d\n", ret);
                goto out_disable;
index 78fca2902c8da38fd07660e381e8ed55c0d78c2d..01781ae5d0d7f3de38c811dad727ecc179c94be7 100644 (file)
@@ -946,7 +946,7 @@ static int __devinit ab3100_probe(struct i2c_client *client,
        }
 
        err = mfd_add_devices(&client->dev, 0, ab3100_devs,
-               ARRAY_SIZE(ab3100_devs), NULL, 0);
+                             ARRAY_SIZE(ab3100_devs), NULL, 0, NULL);
 
        ab3100_setup_debugfs(ab3100);
 
index 626b4ecaf64761fdd3cd43ac3f02986d6830e467..47adf800024e01f8cdf05856eb3b356226c527f3 100644 (file)
@@ -1418,25 +1418,25 @@ static int __devinit ab8500_probe(struct platform_device *pdev)
 
        ret = mfd_add_devices(ab8500->dev, 0, abx500_common_devs,
                        ARRAY_SIZE(abx500_common_devs), NULL,
-                       ab8500->irq_base);
+                       ab8500->irq_base, ab8500->domain);
        if (ret)
                goto out_freeirq;
 
        if (is_ab9540(ab8500))
                ret = mfd_add_devices(ab8500->dev, 0, ab9540_devs,
                                ARRAY_SIZE(ab9540_devs), NULL,
-                               ab8500->irq_base);
+                               ab8500->irq_base, ab8500->domain);
        else
                ret = mfd_add_devices(ab8500->dev, 0, ab8500_devs,
                                ARRAY_SIZE(ab8500_devs), NULL,
-                               ab8500->irq_base);
+                               ab8500->irq_base, ab8500->domain);
        if (ret)
                goto out_freeirq;
 
        if (is_ab9540(ab8500) || is_ab8505(ab8500))
                ret = mfd_add_devices(ab8500->dev, 0, ab9540_ab8505_devs,
                                ARRAY_SIZE(ab9540_ab8505_devs), NULL,
-                               ab8500->irq_base);
+                               ab8500->irq_base, ab8500->domain);
        if (ret)
                goto out_freeirq;
 
@@ -1444,7 +1444,7 @@ static int __devinit ab8500_probe(struct platform_device *pdev)
                /* Add battery management devices */
                ret = mfd_add_devices(ab8500->dev, 0, ab8500_bm_devs,
                                      ARRAY_SIZE(ab8500_bm_devs), NULL,
-                                     ab8500->irq_base);
+                                     ab8500->irq_base, ab8500->domain);
                if (ret)
                        dev_err(ab8500->dev, "error adding bm devices\n");
        }
index c7983e862549a0b79775cae3e76c7cf0c867ad83..1b48f2094806c75fa8914657978f1b7bf814ae93 100644 (file)
@@ -316,7 +316,7 @@ int __devinit arizona_dev_init(struct arizona *arizona)
        }
 
        ret = mfd_add_devices(arizona->dev, -1, early_devs,
-                             ARRAY_SIZE(early_devs), NULL, 0);
+                             ARRAY_SIZE(early_devs), NULL, 0, NULL);
        if (ret != 0) {
                dev_err(dev, "Failed to add early children: %d\n", ret);
                return ret;
@@ -516,11 +516,11 @@ int __devinit arizona_dev_init(struct arizona *arizona)
        switch (arizona->type) {
        case WM5102:
                ret = mfd_add_devices(arizona->dev, -1, wm5102_devs,
-                                     ARRAY_SIZE(wm5102_devs), NULL, 0);
+                                     ARRAY_SIZE(wm5102_devs), NULL, 0, NULL);
                break;
        case WM5110:
                ret = mfd_add_devices(arizona->dev, -1, wm5110_devs,
-                                     ARRAY_SIZE(wm5102_devs), NULL, 0);
+                                     ARRAY_SIZE(wm5102_devs), NULL, 0, NULL);
                break;
        }
 
index 683e18a23329802875d03f92d53e354a6474ad9d..62f0883a7630c360ab9e52f9fa11f306d1a3efee 100644 (file)
@@ -913,14 +913,14 @@ static int __init asic3_mfd_probe(struct platform_device *pdev,
        if (pdata->clock_rate) {
                ds1wm_pdata.clock_rate = pdata->clock_rate;
                ret = mfd_add_devices(&pdev->dev, pdev->id,
-                       &asic3_cell_ds1wm, 1, mem, asic->irq_base);
+                       &asic3_cell_ds1wm, 1, mem, asic->irq_base, NULL);
                if (ret < 0)
                        goto out;
        }
 
        if (mem_sdio && (irq >= 0)) {
                ret = mfd_add_devices(&pdev->dev, pdev->id,
-                       &asic3_cell_mmc, 1, mem_sdio, irq);
+                       &asic3_cell_mmc, 1, mem_sdio, irq, NULL);
                if (ret < 0)
                        goto out;
        }
@@ -934,7 +934,7 @@ static int __init asic3_mfd_probe(struct platform_device *pdev,
                        asic3_cell_leds[i].pdata_size = sizeof(pdata->leds[i]);
                }
                ret = mfd_add_devices(&pdev->dev, 0,
-                       asic3_cell_leds, ASIC3_NUM_LEDS, NULL, 0);
+                       asic3_cell_leds, ASIC3_NUM_LEDS, NULL, 0, NULL);
        }
 
  out:
index 3419e726de478cb330801d1dfb1db42d2a5d1748..2b282133c725b1b6fbb9c0f10442760640bfcd47 100644 (file)
@@ -149,7 +149,7 @@ static int __devinit cs5535_mfd_probe(struct pci_dev *pdev,
        }
 
        err = mfd_add_devices(&pdev->dev, -1, cs5535_mfd_cells,
-                       ARRAY_SIZE(cs5535_mfd_cells), NULL, 0);
+                             ARRAY_SIZE(cs5535_mfd_cells), NULL, 0, NULL);
        if (err) {
                dev_err(&pdev->dev, "MFD add devices failed: %d\n", err);
                goto err_disable;
index 2544910e1fd604f5f6184009a1a208fa5f838cf2..a0a62b24621b831cb0c7c43e62692bd061eb7163 100644 (file)
@@ -803,7 +803,7 @@ int __devinit da9052_device_init(struct da9052 *da9052, u8 chip_id)
                dev_err(da9052->dev, "DA9052 ADC IRQ failed ret=%d\n", ret);
 
        ret = mfd_add_devices(da9052->dev, -1, da9052_subdev_info,
-                             ARRAY_SIZE(da9052_subdev_info), NULL, 0);
+                             ARRAY_SIZE(da9052_subdev_info), NULL, 0, NULL);
        if (ret)
                goto err;
 
index 4e2af2cb2d26a76534c884c3cc57fe97f3c30d52..45e83a68641b81d0a5f7605b19027e2cd5b1aec0 100644 (file)
@@ -129,7 +129,7 @@ static int __init davinci_vc_probe(struct platform_device *pdev)
        cell->pdata_size = sizeof(*davinci_vc);
 
        ret = mfd_add_devices(&pdev->dev, pdev->id, davinci_vc->cells,
-                             DAVINCI_VC_CELLS, NULL, 0);
+                             DAVINCI_VC_CELLS, NULL, 0, NULL);
        if (ret != 0) {
                dev_err(&pdev->dev, "fail to register client devices\n");
                goto fail4;
index 7040a0081130c93ce6b73145355abec0a8c571b8..0e63cdd9b52abc44666c57994623e120f11644b2 100644 (file)
@@ -3010,7 +3010,7 @@ static int __devinit db8500_prcmu_probe(struct platform_device *pdev)
                prcmu_config_esram0_deep_sleep(ESRAM0_DEEP_SLEEP_STATE_RET);
 
        err = mfd_add_devices(&pdev->dev, 0, db8500_prcmu_devs,
-                       ARRAY_SIZE(db8500_prcmu_devs), NULL, 0);
+                             ARRAY_SIZE(db8500_prcmu_devs), NULL, 0, NULL);
        if (err) {
                pr_err("prcmu: Failed to add subdevices\n");
                return err;
index 04c7093d6499cb88f330b2f768713e49e58c9ffc..9e5453d21a6806263d17bc6ada0c07a3a2a7013a 100644 (file)
@@ -168,7 +168,7 @@ static int __init pasic3_probe(struct platform_device *pdev)
                /* the first 5 PASIC3 registers control the DS1WM */
                ds1wm_resources[0].end = (5 << asic->bus_shift) - 1;
                ret = mfd_add_devices(&pdev->dev, pdev->id,
-                               &ds1wm_cell, 1, r, irq);
+                                     &ds1wm_cell, 1, r, irq, NULL);
                if (ret < 0)
                        dev_warn(dev, "failed to register DS1WM\n");
        }
@@ -176,7 +176,8 @@ static int __init pasic3_probe(struct platform_device *pdev)
        if (pdata && pdata->led_pdata) {
                led_cell.platform_data = pdata->led_pdata;
                led_cell.pdata_size = sizeof(struct pasic3_leds_machinfo);
-               ret = mfd_add_devices(&pdev->dev, pdev->id, &led_cell, 1, r, 0);
+               ret = mfd_add_devices(&pdev->dev, pdev->id, &led_cell, 1, r,
+                                     0, NULL);
                if (ret < 0)
                        dev_warn(dev, "failed to register LED device\n");
        }
index 59df5584cb58f54a25a424ca2551417a231fcb4d..266bdc5bd96d17ea1bc05967d911aa4b9b7fc8bb 100644 (file)
@@ -344,13 +344,13 @@ static int __devinit intel_msic_init_devices(struct intel_msic *msic)
                        continue;
 
                ret = mfd_add_devices(&pdev->dev, -1, &msic_devs[i], 1, NULL,
-                                     pdata->irq[i]);
+                                     pdata->irq[i], NULL);
                if (ret)
                        goto fail;
        }
 
        ret = mfd_add_devices(&pdev->dev, 0, msic_other_devs,
-                             ARRAY_SIZE(msic_other_devs), NULL, 0);
+                             ARRAY_SIZE(msic_other_devs), NULL, 0, NULL);
        if (ret)
                goto fail;
 
index 2ea99989551af85a4796c69e5fc2b65ba7a951e9..965c4801df8a1765e069ecb8707df451984b080a 100644 (file)
@@ -147,7 +147,7 @@ static int __devinit cmodio_probe_submodules(struct cmodio_device *priv)
        }
 
        return mfd_add_devices(&pdev->dev, 0, priv->cells,
-                              num_probed, NULL, pdev->irq);
+                              num_probed, NULL, pdev->irq, NULL);
 }
 
 /*
index 87662a17dec62d527a3af1b8b7fb8c782c85dd92..c6b6d7dda517528081a9e3f7e96d96458846ef67 100644 (file)
@@ -287,7 +287,8 @@ static int __devinit jz4740_adc_probe(struct platform_device *pdev)
        writeb(0xff, adc->base + JZ_REG_ADC_CTRL);
 
        ret = mfd_add_devices(&pdev->dev, 0, jz4740_adc_cells,
-               ARRAY_SIZE(jz4740_adc_cells), mem_base, irq_base);
+                             ARRAY_SIZE(jz4740_adc_cells), mem_base,
+                             irq_base, NULL);
        if (ret < 0)
                goto err_clk_put;
 
index 0b2879b87fd999f537bbfc652da9a29c90eb1042..24212f45b201458961373df48c08d9a1f7f9536d 100644 (file)
@@ -393,7 +393,8 @@ static int __devinit lm3533_device_als_init(struct lm3533 *lm3533)
        lm3533_als_devs[0].platform_data = pdata->als;
        lm3533_als_devs[0].pdata_size = sizeof(*pdata->als);
 
-       ret = mfd_add_devices(lm3533->dev, 0, lm3533_als_devs, 1, NULL, 0);
+       ret = mfd_add_devices(lm3533->dev, 0, lm3533_als_devs, 1, NULL,
+                             0, NULL);
        if (ret) {
                dev_err(lm3533->dev, "failed to add ALS device\n");
                return ret;
@@ -422,7 +423,7 @@ static int __devinit lm3533_device_bl_init(struct lm3533 *lm3533)
        }
 
        ret = mfd_add_devices(lm3533->dev, 0, lm3533_bl_devs,
-                                       pdata->num_backlights, NULL, 0);
+                             pdata->num_backlights, NULL, 0, NULL);
        if (ret) {
                dev_err(lm3533->dev, "failed to add backlight devices\n");
                return ret;
@@ -451,7 +452,7 @@ static int __devinit lm3533_device_led_init(struct lm3533 *lm3533)
        }
 
        ret = mfd_add_devices(lm3533->dev, 0, lm3533_led_devs,
-                                               pdata->num_leds, NULL, 0);
+                             pdata->num_leds, NULL, 0, NULL);
        if (ret) {
                dev_err(lm3533->dev, "failed to add LED devices\n");
                return ret;
index 027cc8f861324de8f10bc7e9a1aa66af9e6ef093..092ad4b44b6d67b9b4ee039fccc73da1748d3a2c 100644 (file)
@@ -750,7 +750,7 @@ gpe0_done:
 
        lpc_ich_finalize_cell(&lpc_ich_cells[LPC_GPIO], id);
        ret = mfd_add_devices(&dev->dev, -1, &lpc_ich_cells[LPC_GPIO],
-                               1, NULL, 0);
+                             1, NULL, 0, NULL);
 
 gpio_done:
        if (acpi_conflict)
@@ -765,7 +765,6 @@ static int __devinit lpc_ich_init_wdt(struct pci_dev *dev,
        u32 base_addr_cfg;
        u32 base_addr;
        int ret;
-       bool acpi_conflict = false;
        struct resource *res;
 
        /* Setup power management base register */
@@ -780,20 +779,11 @@ static int __devinit lpc_ich_init_wdt(struct pci_dev *dev,
        res = wdt_io_res(ICH_RES_IO_TCO);
        res->start = base_addr + ACPIBASE_TCO_OFF;
        res->end = base_addr + ACPIBASE_TCO_END;
-       ret = acpi_check_resource_conflict(res);
-       if (ret) {
-               acpi_conflict = true;
-               goto wdt_done;
-       }
 
        res = wdt_io_res(ICH_RES_IO_SMI);
        res->start = base_addr + ACPIBASE_SMI_OFF;
        res->end = base_addr + ACPIBASE_SMI_END;
-       ret = acpi_check_resource_conflict(res);
-       if (ret) {
-               acpi_conflict = true;
-               goto wdt_done;
-       }
+
        lpc_ich_enable_acpi_space(dev);
 
        /*
@@ -813,21 +803,13 @@ static int __devinit lpc_ich_init_wdt(struct pci_dev *dev,
                res = wdt_mem_res(ICH_RES_MEM_GCS);
                res->start = base_addr + ACPIBASE_GCS_OFF;
                res->end = base_addr + ACPIBASE_GCS_END;
-               ret = acpi_check_resource_conflict(res);
-               if (ret) {
-                       acpi_conflict = true;
-                       goto wdt_done;
-               }
        }
 
        lpc_ich_finalize_cell(&lpc_ich_cells[LPC_WDT], id);
        ret = mfd_add_devices(&dev->dev, -1, &lpc_ich_cells[LPC_WDT],
-                               1, NULL, 0);
+                             1, NULL, 0, NULL);
 
 wdt_done:
-       if (acpi_conflict)
-               pr_warn("Resource conflict(s) found affecting %s\n",
-                               lpc_ich_cells[LPC_WDT].name);
        return ret;
 }
 
index 9f20abc5e3937065238ff1f3240c27cde9cbb4f6..f6b9c5c96b24f7d68e80b9ca2db8f202c2e3dd91 100644 (file)
@@ -127,7 +127,8 @@ static int __devinit lpc_sch_probe(struct pci_dev *dev,
                lpc_sch_cells[i].id = id->device;
 
        ret = mfd_add_devices(&dev->dev, 0,
-                       lpc_sch_cells, ARRAY_SIZE(lpc_sch_cells), NULL, 0);
+                             lpc_sch_cells, ARRAY_SIZE(lpc_sch_cells), NULL,
+                             0, NULL);
        if (ret)
                goto out_dev;
 
@@ -153,7 +154,8 @@ static int __devinit lpc_sch_probe(struct pci_dev *dev,
                        tunnelcreek_cells[i].id = id->device;
 
                ret = mfd_add_devices(&dev->dev, 0, tunnelcreek_cells,
-                       ARRAY_SIZE(tunnelcreek_cells), NULL, 0);
+                                     ARRAY_SIZE(tunnelcreek_cells), NULL,
+                                     0, NULL);
        }
 
        return ret;
index c03e12b51924060704641c0152ad6e629d167a72..d9e24c849a00a3f21aad864442293aa2db0ca64d 100644 (file)
@@ -126,7 +126,7 @@ static int max77686_i2c_probe(struct i2c_client *i2c,
        max77686_irq_init(max77686);
 
        ret = mfd_add_devices(max77686->dev, -1, max77686_devs,
-                             ARRAY_SIZE(max77686_devs), NULL, 0);
+                             ARRAY_SIZE(max77686_devs), NULL, 0, NULL);
 
        if (ret < 0)
                goto err_mfd;
index 2b403569e0a6411a92c0e7cb66c7d703d60cc0cf..1029d018c73921828f34740b4034c0cd7df5c3bd 100644 (file)
@@ -137,6 +137,9 @@ static void max77693_irq_mask(struct irq_data *data)
        const struct max77693_irq_data *irq_data =
                                irq_to_max77693_irq(max77693, data->irq);
 
+       if (irq_data->group >= MAX77693_IRQ_GROUP_NR)
+               return;
+
        if (irq_data->group >= MUIC_INT1 && irq_data->group <= MUIC_INT3)
                max77693->irq_masks_cur[irq_data->group] &= ~irq_data->mask;
        else
@@ -149,6 +152,9 @@ static void max77693_irq_unmask(struct irq_data *data)
        const struct max77693_irq_data *irq_data =
            irq_to_max77693_irq(max77693, data->irq);
 
+       if (irq_data->group >= MAX77693_IRQ_GROUP_NR)
+               return;
+
        if (irq_data->group >= MUIC_INT1 && irq_data->group <= MUIC_INT3)
                max77693->irq_masks_cur[irq_data->group] |= irq_data->mask;
        else
@@ -200,7 +206,7 @@ static irqreturn_t max77693_irq_thread(int irq, void *data)
 
        if (irq_src & MAX77693_IRQSRC_MUIC)
                /* MUIC INT1 ~ INT3 */
-               max77693_bulk_read(max77693->regmap, MAX77693_MUIC_REG_INT1,
+               max77693_bulk_read(max77693->regmap_muic, MAX77693_MUIC_REG_INT1,
                        MAX77693_NUM_IRQ_MUIC_REGS, &irq_reg[MUIC_INT1]);
 
        /* Apply masking */
@@ -255,7 +261,8 @@ int max77693_irq_init(struct max77693_dev *max77693)
 {
        struct irq_domain *domain;
        int i;
-       int ret;
+       int ret = 0;
+       u8 intsrc_mask;
 
        mutex_init(&max77693->irqlock);
 
@@ -287,19 +294,38 @@ int max77693_irq_init(struct max77693_dev *max77693)
                                        &max77693_irq_domain_ops, max77693);
        if (!domain) {
                dev_err(max77693->dev, "could not create irq domain\n");
-               return -ENODEV;
+               ret = -ENODEV;
+               goto err_irq;
        }
        max77693->irq_domain = domain;
 
+       /* Unmask max77693 interrupt */
+       ret = max77693_read_reg(max77693->regmap,
+                       MAX77693_PMIC_REG_INTSRC_MASK, &intsrc_mask);
+       if (ret < 0) {
+               dev_err(max77693->dev, "fail to read PMIC register\n");
+               goto err_irq;
+       }
+
+       intsrc_mask &= ~(MAX77693_IRQSRC_CHG);
+       intsrc_mask &= ~(MAX77693_IRQSRC_FLASH);
+       intsrc_mask &= ~(MAX77693_IRQSRC_MUIC);
+       ret = max77693_write_reg(max77693->regmap,
+                       MAX77693_PMIC_REG_INTSRC_MASK, intsrc_mask);
+       if (ret < 0) {
+               dev_err(max77693->dev, "fail to write PMIC register\n");
+               goto err_irq;
+       }
+
        ret = request_threaded_irq(max77693->irq, NULL, max77693_irq_thread,
                                   IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
                                   "max77693-irq", max77693);
-
        if (ret)
                dev_err(max77693->dev, "Failed to request IRQ %d: %d\n",
                        max77693->irq, ret);
 
-       return 0;
+err_irq:
+       return ret;
 }
 
 void max77693_irq_exit(struct max77693_dev *max77693)
index a1811cb50ec75fc7c1dffd02aca98f132d7e4810..cc5155e20494726c2ae6954e64128f61973ebafd 100644 (file)
@@ -152,6 +152,20 @@ static int max77693_i2c_probe(struct i2c_client *i2c,
        max77693->haptic = i2c_new_dummy(i2c->adapter, I2C_ADDR_HAPTIC);
        i2c_set_clientdata(max77693->haptic, max77693);
 
+       /*
+        * Initialize register map for MUIC device because use regmap-muic
+        * instance of MUIC device when irq of max77693 is initialized
+        * before call max77693-muic probe() function.
+        */
+       max77693->regmap_muic = devm_regmap_init_i2c(max77693->muic,
+                                        &max77693_regmap_config);
+       if (IS_ERR(max77693->regmap_muic)) {
+               ret = PTR_ERR(max77693->regmap_muic);
+               dev_err(max77693->dev,
+                       "failed to allocate register map: %d\n", ret);
+               goto err_regmap;
+       }
+
        ret = max77693_irq_init(max77693);
        if (ret < 0)
                goto err_irq;
@@ -159,7 +173,7 @@ static int max77693_i2c_probe(struct i2c_client *i2c,
        pm_runtime_set_active(max77693->dev);
 
        ret = mfd_add_devices(max77693->dev, -1, max77693_devs,
-                       ARRAY_SIZE(max77693_devs), NULL, 0);
+                             ARRAY_SIZE(max77693_devs), NULL, 0, NULL);
        if (ret < 0)
                goto err_mfd;
 
index 825a7f06d9ba5ade6281810bec19c209187561b2..ee53757beca7e8344c15a66bbe5d51bcce7b9da1 100644 (file)
@@ -598,7 +598,7 @@ int __devinit max8925_device_init(struct max8925_chip *chip,
 
        ret = mfd_add_devices(chip->dev, 0, &rtc_devs[0],
                              ARRAY_SIZE(rtc_devs),
-                             &rtc_resources[0], chip->irq_base);
+                             &rtc_resources[0], chip->irq_base, NULL);
        if (ret < 0) {
                dev_err(chip->dev, "Failed to add rtc subdev\n");
                goto out;
@@ -606,7 +606,7 @@ int __devinit max8925_device_init(struct max8925_chip *chip,
 
        ret = mfd_add_devices(chip->dev, 0, &onkey_devs[0],
                              ARRAY_SIZE(onkey_devs),
-                             &onkey_resources[0], 0);
+                             &onkey_resources[0], 0, NULL);
        if (ret < 0) {
                dev_err(chip->dev, "Failed to add onkey subdev\n");
                goto out_dev;
@@ -615,7 +615,7 @@ int __devinit max8925_device_init(struct max8925_chip *chip,
        if (pdata) {
                ret = mfd_add_devices(chip->dev, 0, &regulator_devs[0],
                                      ARRAY_SIZE(regulator_devs),
-                                     &regulator_resources[0], 0);
+                                     &regulator_resources[0], 0, NULL);
                if (ret < 0) {
                        dev_err(chip->dev, "Failed to add regulator subdev\n");
                        goto out_dev;
@@ -625,7 +625,7 @@ int __devinit max8925_device_init(struct max8925_chip *chip,
        if (pdata && pdata->backlight) {
                ret = mfd_add_devices(chip->dev, 0, &backlight_devs[0],
                                      ARRAY_SIZE(backlight_devs),
-                                     &backlight_resources[0], 0);
+                                     &backlight_resources[0], 0, NULL);
                if (ret < 0) {
                        dev_err(chip->dev, "Failed to add backlight subdev\n");
                        goto out_dev;
@@ -635,7 +635,7 @@ int __devinit max8925_device_init(struct max8925_chip *chip,
        if (pdata && pdata->power) {
                ret = mfd_add_devices(chip->dev, 0, &power_devs[0],
                                        ARRAY_SIZE(power_devs),
-                                       &power_supply_resources[0], 0);
+                                     &power_supply_resources[0], 0, NULL);
                if (ret < 0) {
                        dev_err(chip->dev, "Failed to add power supply "
                                "subdev\n");
@@ -646,7 +646,7 @@ int __devinit max8925_device_init(struct max8925_chip *chip,
        if (pdata && pdata->touch) {
                ret = mfd_add_devices(chip->dev, 0, &touch_devs[0],
                                      ARRAY_SIZE(touch_devs),
-                                     &touch_resources[0], 0);
+                                     &touch_resources[0], 0, NULL);
                if (ret < 0) {
                        dev_err(chip->dev, "Failed to add touch subdev\n");
                        goto out_dev;
index 10b629c245b6770d304dde9bf978891c2a5ac800..f123517065ec911ff6e4154aa25a3f8e1dd0bd98 100644 (file)
@@ -160,7 +160,7 @@ static int max8997_i2c_probe(struct i2c_client *i2c,
 
        mfd_add_devices(max8997->dev, -1, max8997_devs,
                        ARRAY_SIZE(max8997_devs),
-                       NULL, 0);
+                       NULL, 0, NULL);
 
        /*
         * TODO: enable others (flash, muic, rtc, battery, ...) and
index 6ef56d28c05686bf298f589cebf91c43fcc707b8..d7218cc90945a643ee03cc670852d42078131503 100644 (file)
@@ -161,13 +161,13 @@ static int max8998_i2c_probe(struct i2c_client *i2c,
        switch (id->driver_data) {
        case TYPE_LP3974:
                ret = mfd_add_devices(max8998->dev, -1,
-                               lp3974_devs, ARRAY_SIZE(lp3974_devs),
-                               NULL, 0);
+                                     lp3974_devs, ARRAY_SIZE(lp3974_devs),
+                                     NULL, 0, NULL);
                break;
        case TYPE_MAX8998:
                ret = mfd_add_devices(max8998->dev, -1,
-                               max8998_devs, ARRAY_SIZE(max8998_devs),
-                               NULL, 0);
+                                     max8998_devs, ARRAY_SIZE(max8998_devs),
+                                     NULL, 0, NULL);
                break;
        default:
                ret = -EINVAL;
index b801dc72f041a125fcf9a52e25e6d594ee052d92..1ec79b54bd2f12f304c57f92633cc8c125a0d389 100644 (file)
@@ -612,7 +612,7 @@ static int mc13xxx_add_subdevice_pdata(struct mc13xxx *mc13xxx,
        if (!cell.name)
                return -ENOMEM;
 
-       return mfd_add_devices(mc13xxx->dev, -1, &cell, 1, NULL, 0);
+       return mfd_add_devices(mc13xxx->dev, -1, &cell, 1, NULL, 0, NULL);
 }
 
 static int mc13xxx_add_subdevice(struct mc13xxx *mc13xxx, const char *format)
index 0c3a01cde2f7615960fb2c9cc20ba7489bf00fbd..f8b77711ad2da4c26acade369d0bc6443631427b 100644 (file)
@@ -74,12 +74,11 @@ static int mfd_platform_add_cell(struct platform_device *pdev,
 static int mfd_add_device(struct device *parent, int id,
                          const struct mfd_cell *cell,
                          struct resource *mem_base,
-                         int irq_base)
+                         int irq_base, struct irq_domain *domain)
 {
        struct resource *res;
        struct platform_device *pdev;
        struct device_node *np = NULL;
-       struct irq_domain *domain = NULL;
        int ret = -ENOMEM;
        int r;
 
@@ -97,7 +96,6 @@ static int mfd_add_device(struct device *parent, int id,
                for_each_child_of_node(parent->of_node, np) {
                        if (of_device_is_compatible(np, cell->of_compatible)) {
                                pdev->dev.of_node = np;
-                               domain = irq_find_host(parent->of_node);
                                break;
                        }
                }
@@ -177,7 +175,7 @@ fail_alloc:
 int mfd_add_devices(struct device *parent, int id,
                    struct mfd_cell *cells, int n_devs,
                    struct resource *mem_base,
-                   int irq_base)
+                   int irq_base, struct irq_domain *domain)
 {
        int i;
        int ret = 0;
@@ -191,7 +189,8 @@ int mfd_add_devices(struct device *parent, int id,
        for (i = 0; i < n_devs; i++) {
                atomic_set(&cnts[i], 0);
                cells[i].usage_count = &cnts[i];
-               ret = mfd_add_device(parent, id, cells + i, mem_base, irq_base);
+               ret = mfd_add_device(parent, id, cells + i, mem_base,
+                                    irq_base, domain);
                if (ret)
                        break;
        }
@@ -247,7 +246,8 @@ int mfd_clone_cell(const char *cell, const char **clones, size_t n_clones)
        for (i = 0; i < n_clones; i++) {
                cell_entry.name = clones[i];
                /* don't give up if a single call fails; just report error */
-               if (mfd_add_device(pdev->dev.parent, -1, &cell_entry, NULL, 0))
+               if (mfd_add_device(pdev->dev.parent, -1, &cell_entry, NULL, 0,
+                                  NULL))
                        dev_err(dev, "failed to create platform device '%s'\n",
                                        clones[i]);
        }
index c4a69f193a1df1985abfbaeeffb8e39cda933493..a345f9bb7b4758765725cf2c056303950ecc1c02 100644 (file)
@@ -453,7 +453,8 @@ static int __devinit palmas_i2c_probe(struct i2c_client *i2c,
 
        ret = mfd_add_devices(palmas->dev, -1,
                              children, ARRAY_SIZE(palmas_children),
-                             NULL, regmap_irq_chip_get_base(palmas->irq_data));
+                             NULL, regmap_irq_chip_get_base(palmas->irq_data),
+                             NULL);
        kfree(children);
 
        if (ret < 0)
index cdc1df7fa0e94d10a26059c18dd347c045dcf2cd..3a8fa88567b18385461fbed0bbc9ff30e8d0fffc 100644 (file)
@@ -289,7 +289,7 @@ static int __devinit rc5t583_i2c_probe(struct i2c_client *i2c,
        }
 
        ret = mfd_add_devices(rc5t583->dev, -1, rc5t583_subdevs,
-                       ARRAY_SIZE(rc5t583_subdevs), NULL, 0);
+                             ARRAY_SIZE(rc5t583_subdevs), NULL, 0, NULL);
        if (ret) {
                dev_err(&i2c->dev, "add mfd devices failed: %d\n", ret);
                goto err_add_devs;
index 685d61e431adfa4f8b733a13bd5c93cfb71c9a44..0f70dce611605fb5e92419c5ac393c2079b74833 100644 (file)
@@ -87,7 +87,8 @@ static int __devinit rdc321x_sb_probe(struct pci_dev *pdev,
        rdc321x_wdt_pdata.sb_pdev = pdev;
 
        return mfd_add_devices(&pdev->dev, -1,
-               rdc321x_sb_cells, ARRAY_SIZE(rdc321x_sb_cells), NULL, 0);
+                              rdc321x_sb_cells, ARRAY_SIZE(rdc321x_sb_cells),
+                              NULL, 0, NULL);
 }
 
 static void __devexit rdc321x_sb_remove(struct pci_dev *pdev)
index 2988efde11ebc60b6a714044f79d6a7cd9a44dba..49d361a618d06f5f26d17c49a8c9d48e20547caf 100644 (file)
@@ -141,19 +141,19 @@ static int sec_pmic_probe(struct i2c_client *i2c,
        switch (sec_pmic->device_type) {
        case S5M8751X:
                ret = mfd_add_devices(sec_pmic->dev, -1, s5m8751_devs,
-                                       ARRAY_SIZE(s5m8751_devs), NULL, 0);
+                                     ARRAY_SIZE(s5m8751_devs), NULL, 0, NULL);
                break;
        case S5M8763X:
                ret = mfd_add_devices(sec_pmic->dev, -1, s5m8763_devs,
-                                       ARRAY_SIZE(s5m8763_devs), NULL, 0);
+                                     ARRAY_SIZE(s5m8763_devs), NULL, 0, NULL);
                break;
        case S5M8767X:
                ret = mfd_add_devices(sec_pmic->dev, -1, s5m8767_devs,
-                                       ARRAY_SIZE(s5m8767_devs), NULL, 0);
+                                     ARRAY_SIZE(s5m8767_devs), NULL, 0, NULL);
                break;
        case S2MPS11X:
                ret = mfd_add_devices(sec_pmic->dev, -1, s2mps11_devs,
-                                       ARRAY_SIZE(s2mps11_devs), NULL, 0);
+                                     ARRAY_SIZE(s2mps11_devs), NULL, 0, NULL);
                break;
        default:
                /* If this happens the probe function is problem */
index d31fed07aefbc51bfb620c492a2f05197886eb36..d35da6820beae8ee2c7e01e1faf51cc48c96972b 100644 (file)
@@ -407,7 +407,7 @@ static int __devinit sta2x11_mfd_probe(struct pci_dev *pdev,
                              sta2x11_mfd_bar0,
                              ARRAY_SIZE(sta2x11_mfd_bar0),
                              &pdev->resource[0],
-                             0);
+                             0, NULL);
        if (err) {
                dev_err(&pdev->dev, "mfd_add_devices[0] failed: %d\n", err);
                goto err_disable;
@@ -417,7 +417,7 @@ static int __devinit sta2x11_mfd_probe(struct pci_dev *pdev,
                              sta2x11_mfd_bar1,
                              ARRAY_SIZE(sta2x11_mfd_bar1),
                              &pdev->resource[1],
-                             0);
+                             0, NULL);
        if (err) {
                dev_err(&pdev->dev, "mfd_add_devices[1] failed: %d\n", err);
                goto err_disable;
index 2dd8d49cb30bc7d63d14250e673b2c12eb4cef3f..c94f521f392cb0b5214385ff57ac6e18e8762907 100644 (file)
@@ -962,7 +962,7 @@ static int __devinit stmpe_add_device(struct stmpe *stmpe,
                                      struct mfd_cell *cell, int irq)
 {
        return mfd_add_devices(stmpe->dev, stmpe->pdata->id, cell, 1,
-                              NULL, stmpe->irq_base + irq);
+                              NULL, stmpe->irq_base + irq, NULL);
 }
 
 static int __devinit stmpe_devices_init(struct stmpe *stmpe)
index 2d9e8799e733c6644c18aa9335e9796ef0c0709f..b32940ec903425d37010b79eed9a16e4b3c20761 100644 (file)
@@ -388,7 +388,7 @@ static int t7l66xb_probe(struct platform_device *dev)
 
        ret = mfd_add_devices(&dev->dev, dev->id,
                              t7l66xb_cells, ARRAY_SIZE(t7l66xb_cells),
-                             iomem, t7l66xb->irq_base);
+                             iomem, t7l66xb->irq_base, NULL);
 
        if (!ret)
                return 0;
index 048bf0532a095014e03358b01af1f4cd58585b97..b56ba6b43294b77e536b12247c0e8b1d18085701 100644 (file)
@@ -262,8 +262,8 @@ static int __devinit tc3589x_device_init(struct tc3589x *tc3589x)
 
        if (blocks & TC3589x_BLOCK_GPIO) {
                ret = mfd_add_devices(tc3589x->dev, -1, tc3589x_dev_gpio,
-                               ARRAY_SIZE(tc3589x_dev_gpio), NULL,
-                               tc3589x->irq_base);
+                                     ARRAY_SIZE(tc3589x_dev_gpio), NULL,
+                                     tc3589x->irq_base, NULL);
                if (ret) {
                        dev_err(tc3589x->dev, "failed to add gpio child\n");
                        return ret;
@@ -273,8 +273,8 @@ static int __devinit tc3589x_device_init(struct tc3589x *tc3589x)
 
        if (blocks & TC3589x_BLOCK_KEYPAD) {
                ret = mfd_add_devices(tc3589x->dev, -1, tc3589x_dev_keypad,
-                               ARRAY_SIZE(tc3589x_dev_keypad), NULL,
-                               tc3589x->irq_base);
+                                     ARRAY_SIZE(tc3589x_dev_keypad), NULL,
+                                     tc3589x->irq_base, NULL);
                if (ret) {
                        dev_err(tc3589x->dev, "failed to keypad child\n");
                        return ret;
index d20a284ad4baca528c36eb351200ff16fdd5282e..413c891102f867d32b3bd62b25bb02120fe0159e 100644 (file)
@@ -192,7 +192,7 @@ static int __devinit tc6387xb_probe(struct platform_device *dev)
        printk(KERN_INFO "Toshiba tc6387xb initialised\n");
 
        ret = mfd_add_devices(&dev->dev, dev->id, tc6387xb_cells,
-                             ARRAY_SIZE(tc6387xb_cells), iomem, irq);
+                             ARRAY_SIZE(tc6387xb_cells), iomem, irq, NULL);
 
        if (!ret)
                return 0;
index 9612264f0e6dcf7832ebf2f4736815b4eabc6a4b..dcab026fcbb25070a7c79c8e2601b3b911079367 100644 (file)
@@ -700,8 +700,8 @@ static int __devinit tc6393xb_probe(struct platform_device *dev)
        tc6393xb_cells[TC6393XB_CELL_FB].pdata_size = sizeof(*tcpd->fb_data);
 
        ret = mfd_add_devices(&dev->dev, dev->id,
-                       tc6393xb_cells, ARRAY_SIZE(tc6393xb_cells),
-                       iomem, tcpd->irq_base);
+                             tc6393xb_cells, ARRAY_SIZE(tc6393xb_cells),
+                             iomem, tcpd->irq_base, NULL);
 
        if (!ret)
                return 0;
index 4fb0e6c8e8fe0fbfee94299b7c0de45920621a25..7c3675a74f93414c806543f4c50bc44cbb8ad46f 100644 (file)
@@ -412,7 +412,7 @@ static int __devinit ti_ssp_probe(struct platform_device *pdev)
                cells[id].data_size     = data->pdata_size;
        }
 
-       error = mfd_add_devices(dev, 0, cells, 2, NULL, 0);
+       error = mfd_add_devices(dev, 0, cells, 2, NULL, 0, NULL);
        if (error < 0) {
                dev_err(dev, "cannot add mfd cells\n");
                goto error_enable;
index a447f4ec11fb757ee38755aa44da467f83229d1f..cccc626c83c80c2bf79cf03c89936d5249a44714 100644 (file)
@@ -757,25 +757,25 @@ static int __devinit timb_probe(struct pci_dev *dev,
                err = mfd_add_devices(&dev->dev, -1,
                        timberdale_cells_bar0_cfg0,
                        ARRAY_SIZE(timberdale_cells_bar0_cfg0),
-                       &dev->resource[0], msix_entries[0].vector);
+                       &dev->resource[0], msix_entries[0].vector, NULL);
                break;
        case TIMB_HW_VER1:
                err = mfd_add_devices(&dev->dev, -1,
                        timberdale_cells_bar0_cfg1,
                        ARRAY_SIZE(timberdale_cells_bar0_cfg1),
-                       &dev->resource[0], msix_entries[0].vector);
+                       &dev->resource[0], msix_entries[0].vector, NULL);
                break;
        case TIMB_HW_VER2:
                err = mfd_add_devices(&dev->dev, -1,
                        timberdale_cells_bar0_cfg2,
                        ARRAY_SIZE(timberdale_cells_bar0_cfg2),
-                       &dev->resource[0], msix_entries[0].vector);
+                       &dev->resource[0], msix_entries[0].vector, NULL);
                break;
        case TIMB_HW_VER3:
                err = mfd_add_devices(&dev->dev, -1,
                        timberdale_cells_bar0_cfg3,
                        ARRAY_SIZE(timberdale_cells_bar0_cfg3),
-                       &dev->resource[0], msix_entries[0].vector);
+                       &dev->resource[0], msix_entries[0].vector, NULL);
                break;
        default:
                dev_err(&dev->dev, "Uknown IP setup: %d.%d.%d\n",
@@ -792,7 +792,7 @@ static int __devinit timb_probe(struct pci_dev *dev,
 
        err = mfd_add_devices(&dev->dev, 0,
                timberdale_cells_bar1, ARRAY_SIZE(timberdale_cells_bar1),
-               &dev->resource[1], msix_entries[0].vector);
+               &dev->resource[1], msix_entries[0].vector, NULL);
        if (err) {
                dev_err(&dev->dev, "mfd_add_devices failed: %d\n", err);
                goto err_mfd2;
@@ -803,7 +803,7 @@ static int __devinit timb_probe(struct pci_dev *dev,
                ((priv->fw.config & TIMB_HW_VER_MASK) == TIMB_HW_VER3)) {
                err = mfd_add_devices(&dev->dev, 1, timberdale_cells_bar2,
                        ARRAY_SIZE(timberdale_cells_bar2),
-                       &dev->resource[2], msix_entries[0].vector);
+                       &dev->resource[2], msix_entries[0].vector, NULL);
                if (err) {
                        dev_err(&dev->dev, "mfd_add_devices failed: %d\n", err);
                        goto err_mfd2;
index a293b978e27ce19b116025e3ab4e87be6ae0dd74..14051bdc714b80197f258c14afd9cf629667e765 100644 (file)
@@ -188,7 +188,7 @@ static int __devinit tps6105x_probe(struct i2c_client *client,
        }
 
        ret = mfd_add_devices(&client->dev, 0, tps6105x_cells,
-               ARRAY_SIZE(tps6105x_cells), NULL, 0);
+                             ARRAY_SIZE(tps6105x_cells), NULL, 0, NULL);
        if (ret)
                goto fail;
 
index 33ba7723c967435b67c49a6f3f78212a63204b4e..1b203499c74402c59c19bf29a8ce6aa7b51a058b 100644 (file)
@@ -100,7 +100,7 @@ static int tps6507x_i2c_probe(struct i2c_client *i2c,
 
        ret = mfd_add_devices(tps6507x->dev, -1,
                              tps6507x_devs, ARRAY_SIZE(tps6507x_devs),
-                             NULL, 0);
+                             NULL, 0, NULL);
 
        if (ret < 0)
                goto err;
index 80e24f4b47bffce67679b7e637627e9e7c769466..50fd87c87a1cca64e21104401c1adbff1b4cf45c 100644 (file)
@@ -292,7 +292,7 @@ static int __devinit tps65090_i2c_probe(struct i2c_client *client,
        }
 
        ret = mfd_add_devices(tps65090->dev, -1, tps65090s,
-               ARRAY_SIZE(tps65090s), NULL, 0);
+                             ARRAY_SIZE(tps65090s), NULL, 0, NULL);
        if (ret) {
                dev_err(&client->dev, "add mfd devices failed with err: %d\n",
                        ret);
index 61c097a98f5de7eb45fd0ccb8fdc44dd53c869fa..a95e9421b73580df95402f718a4166898a1c41dd 100644 (file)
 #include <linux/slab.h>
 #include <linux/regmap.h>
 #include <linux/err.h>
-#include <linux/regulator/of_regulator.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
 
 #include <linux/mfd/core.h>
 #include <linux/mfd/tps65217.h>
 
+static struct mfd_cell tps65217s[] = {
+       {
+               .name = "tps65217-pmic",
+       },
+};
+
 /**
  * tps65217_reg_read: Read a single tps65217 register.
  *
@@ -133,83 +140,48 @@ int tps65217_clear_bits(struct tps65217 *tps, unsigned int reg,
 }
 EXPORT_SYMBOL_GPL(tps65217_clear_bits);
 
-#ifdef CONFIG_OF
-static struct of_regulator_match reg_matches[] = {
-       { .name = "dcdc1", .driver_data = (void *)TPS65217_DCDC_1 },
-       { .name = "dcdc2", .driver_data = (void *)TPS65217_DCDC_2 },
-       { .name = "dcdc3", .driver_data = (void *)TPS65217_DCDC_3 },
-       { .name = "ldo1", .driver_data = (void *)TPS65217_LDO_1 },
-       { .name = "ldo2", .driver_data = (void *)TPS65217_LDO_2 },
-       { .name = "ldo3", .driver_data = (void *)TPS65217_LDO_3 },
-       { .name = "ldo4", .driver_data = (void *)TPS65217_LDO_4 },
-};
-
-static struct tps65217_board *tps65217_parse_dt(struct i2c_client *client)
-{
-       struct device_node *node = client->dev.of_node;
-       struct tps65217_board *pdata;
-       struct device_node *regs;
-       int count = ARRAY_SIZE(reg_matches);
-       int ret, i;
-
-       regs = of_find_node_by_name(node, "regulators");
-       if (!regs)
-               return NULL;
-
-       ret = of_regulator_match(&client->dev, regs, reg_matches, count);
-       of_node_put(regs);
-       if ((ret < 0) || (ret > count))
-               return NULL;
-
-       count = ret;
-       pdata = devm_kzalloc(&client->dev, count * sizeof(*pdata), GFP_KERNEL);
-       if (!pdata)
-               return NULL;
-
-       for (i = 0; i < count; i++) {
-               if (!reg_matches[i].init_data || !reg_matches[i].of_node)
-                       continue;
-
-               pdata->tps65217_init_data[i] = reg_matches[i].init_data;
-               pdata->of_node[i] = reg_matches[i].of_node;
-       }
-
-       return pdata;
-}
-
-static struct of_device_id tps65217_of_match[] = {
-       { .compatible = "ti,tps65217", },
-       { },
-};
-#else
-static struct tps65217_board *tps65217_parse_dt(struct i2c_client *client)
-{
-       return NULL;
-}
-#endif
-
 static struct regmap_config tps65217_regmap_config = {
        .reg_bits = 8,
        .val_bits = 8,
 };
 
+static const struct of_device_id tps65217_of_match[] = {
+       { .compatible = "ti,tps65217", .data = (void *)TPS65217 },
+       { /* sentinel */ },
+};
+
 static int __devinit tps65217_probe(struct i2c_client *client,
                                const struct i2c_device_id *ids)
 {
        struct tps65217 *tps;
-       struct regulator_init_data *reg_data;
-       struct tps65217_board *pdata = client->dev.platform_data;
-       int i, ret;
        unsigned int version;
+       unsigned int chip_id = ids->driver_data;
+       const struct of_device_id *match;
+       int ret;
 
-       if (!pdata && client->dev.of_node)
-               pdata = tps65217_parse_dt(client);
+       if (client->dev.of_node) {
+               match = of_match_device(tps65217_of_match, &client->dev);
+               if (!match) {
+                       dev_err(&client->dev,
+                               "Failed to find matching dt id\n");
+                       return -EINVAL;
+               }
+               chip_id = (unsigned int)match->data;
+       }
+
+       if (!chip_id) {
+               dev_err(&client->dev, "id is null.\n");
+               return -ENODEV;
+       }
 
        tps = devm_kzalloc(&client->dev, sizeof(*tps), GFP_KERNEL);
        if (!tps)
                return -ENOMEM;
 
-       tps->pdata = pdata;
+       i2c_set_clientdata(client, tps);
+       tps->dev = &client->dev;
+       tps->id = chip_id;
+
        tps->regmap = devm_regmap_init_i2c(client, &tps65217_regmap_config);
        if (IS_ERR(tps->regmap)) {
                ret = PTR_ERR(tps->regmap);
@@ -218,8 +190,12 @@ static int __devinit tps65217_probe(struct i2c_client *client,
                return ret;
        }
 
-       i2c_set_clientdata(client, tps);
-       tps->dev = &client->dev;
+       ret = mfd_add_devices(tps->dev, -1, tps65217s,
+                             ARRAY_SIZE(tps65217s), NULL, 0, NULL);
+       if (ret < 0) {
+               dev_err(tps->dev, "mfd_add_devices failed: %d\n", ret);
+               return ret;
+       }
 
        ret = tps65217_reg_read(tps, TPS65217_REG_CHIPID, &version);
        if (ret < 0) {
@@ -232,41 +208,21 @@ static int __devinit tps65217_probe(struct i2c_client *client,
                        (version & TPS65217_CHIPID_CHIP_MASK) >> 4,
                        version & TPS65217_CHIPID_REV_MASK);
 
-       for (i = 0; i < TPS65217_NUM_REGULATOR; i++) {
-               struct platform_device *pdev;
-
-               pdev = platform_device_alloc("tps65217-pmic", i);
-               if (!pdev) {
-                       dev_err(tps->dev, "Cannot create regulator %d\n", i);
-                       continue;
-               }
-
-               pdev->dev.parent = tps->dev;
-               pdev->dev.of_node = pdata->of_node[i];
-               reg_data = pdata->tps65217_init_data[i];
-               platform_device_add_data(pdev, reg_data, sizeof(*reg_data));
-               tps->regulator_pdev[i] = pdev;
-
-               platform_device_add(pdev);
-       }
-
        return 0;
 }
 
 static int __devexit tps65217_remove(struct i2c_client *client)
 {
        struct tps65217 *tps = i2c_get_clientdata(client);
-       int i;
 
-       for (i = 0; i < TPS65217_NUM_REGULATOR; i++)
-               platform_device_unregister(tps->regulator_pdev[i]);
+       mfd_remove_devices(tps->dev);
 
        return 0;
 }
 
 static const struct i2c_device_id tps65217_id_table[] = {
-       {"tps65217", 0xF0},
-       {/* end of list */}
+       {"tps65217", TPS65217},
+       { /* sentinel */ }
 };
 MODULE_DEVICE_TABLE(i2c, tps65217_id_table);
 
index 353c34812120fc46e37140c0b6a04babdd5cb080..5f58370ccf5502b1d6a729f2daa0db411a6e3e1d 100644 (file)
@@ -493,7 +493,8 @@ static int __devinit tps6586x_i2c_probe(struct i2c_client *client,
        }
 
        ret = mfd_add_devices(tps6586x->dev, -1,
-                       tps6586x_cell, ARRAY_SIZE(tps6586x_cell), NULL, 0);
+                             tps6586x_cell, ARRAY_SIZE(tps6586x_cell),
+                             NULL, 0, NULL);
        if (ret < 0) {
                dev_err(&client->dev, "mfd_add_devices failed: %d\n", ret);
                goto err_mfd_add;
index 1c563792c777ba8f04c194a2a99c39159d8871f7..d3ce4d569deb57c2b707dc73d6cb7cf600962cab 100644 (file)
@@ -254,7 +254,7 @@ static __devinit int tps65910_i2c_probe(struct i2c_client *i2c,
 
        ret = mfd_add_devices(tps65910->dev, -1,
                              tps65910s, ARRAY_SIZE(tps65910s),
-                             NULL, 0);
+                             NULL, 0, NULL);
        if (ret < 0) {
                dev_err(&i2c->dev, "mfd_add_devices failed: %d\n", ret);
                return ret;
index 74fd8cb5f37224e576f58398c20fda9f4884d9e7..4658b5bdcd84488d3379a2ef4f33e334eb96ea2f 100644 (file)
@@ -146,7 +146,7 @@ int tps65912_device_init(struct tps65912 *tps65912)
 
        ret = mfd_add_devices(tps65912->dev, -1,
                              tps65912s, ARRAY_SIZE(tps65912s),
-                             NULL, 0);
+                             NULL, 0, NULL);
        if (ret < 0)
                goto err;
 
index 838ce4eb444e24ce44bd3b120fe21a9f977d6e75..77c9acb145831c5b03fd04cbc08d0f6e0bfd47ec 100644 (file)
@@ -223,7 +223,7 @@ static int __devinit twl4030_audio_probe(struct platform_device *pdev)
 
        if (childs)
                ret = mfd_add_devices(&pdev->dev, pdev->id, audio->cells,
-                                     childs, NULL, 0);
+                                     childs, NULL, 0, NULL);
        else {
                dev_err(&pdev->dev, "No platform data found for childs\n");
                ret = -ENODEV;
index b0fad0ffca560b0714a9574b42d4c87c89ea3e4d..3dca5c195a200505c3796c488b34b65f8a32b7e6 100644 (file)
@@ -632,7 +632,7 @@ static int __devinit twl6040_probe(struct i2c_client *client,
        }
 
        ret = mfd_add_devices(&client->dev, -1, twl6040->cells, children,
-                             NULL, 0);
+                             NULL, 0, NULL);
        if (ret)
                goto mfd_err;
 
index 872aff21e4be6fa682eb01aad72b04492efd622e..b9a636d44c7f95979adcd17172e9d7803fa5d5fe 100644 (file)
@@ -102,7 +102,7 @@ static __devinit int vx855_probe(struct pci_dev *pdev,
        vx855_gpio_resources[1].end = vx855_gpio_resources[1].start + 3;
 
        ret = mfd_add_devices(&pdev->dev, -1, vx855_cells, ARRAY_SIZE(vx855_cells),
-                       NULL, 0);
+                       NULL, 0, NULL);
 
        /* we always return -ENODEV here in order to enable other
         * drivers like old, not-yet-platform_device ported i2c-viapro */
index f39b756df561dcfd5fee7fa310222bb27748d708..86e0e4309fc274e41ec73a20dc1fb675e06fb0fc 100644 (file)
@@ -241,7 +241,7 @@ static int __devinit wl1273_core_probe(struct i2c_client *client,
                __func__, children);
 
        r = mfd_add_devices(&client->dev, -1, core->cells,
-                           children, NULL, 0);
+                           children, NULL, 0, NULL);
        if (r)
                goto err;
 
index 946698fd2dc6a4dc9b3a7868bc7f44c920a46201..3017310359403248719495fe39692698271ba7ed 100644 (file)
@@ -1813,27 +1813,27 @@ int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
        case WM8310:
                ret = mfd_add_devices(wm831x->dev, wm831x_num,
                                      wm8310_devs, ARRAY_SIZE(wm8310_devs),
-                                     NULL, 0);
+                                     NULL, 0, NULL);
                break;
 
        case WM8311:
                ret = mfd_add_devices(wm831x->dev, wm831x_num,
                                      wm8311_devs, ARRAY_SIZE(wm8311_devs),
-                                     NULL, 0);
+                                     NULL, 0, NULL);
                if (!pdata || !pdata->disable_touch)
                        mfd_add_devices(wm831x->dev, wm831x_num,
                                        touch_devs, ARRAY_SIZE(touch_devs),
-                                       NULL, 0);
+                                       NULL, 0, NULL);
                break;
 
        case WM8312:
                ret = mfd_add_devices(wm831x->dev, wm831x_num,
                                      wm8312_devs, ARRAY_SIZE(wm8312_devs),
-                                     NULL, 0);
+                                     NULL, 0, NULL);
                if (!pdata || !pdata->disable_touch)
                        mfd_add_devices(wm831x->dev, wm831x_num,
                                        touch_devs, ARRAY_SIZE(touch_devs),
-                                       NULL, 0);
+                                       NULL, 0, NULL);
                break;
 
        case WM8320:
@@ -1842,7 +1842,7 @@ int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
        case WM8326:
                ret = mfd_add_devices(wm831x->dev, wm831x_num,
                                      wm8320_devs, ARRAY_SIZE(wm8320_devs),
-                                     NULL, 0);
+                                     NULL, 0, NULL);
                break;
 
        default:
@@ -1867,7 +1867,7 @@ int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
        if (ret & WM831X_XTAL_ENA) {
                ret = mfd_add_devices(wm831x->dev, wm831x_num,
                                      rtc_devs, ARRAY_SIZE(rtc_devs),
-                                     NULL, 0);
+                                     NULL, 0, NULL);
                if (ret != 0) {
                        dev_err(wm831x->dev, "Failed to add RTC: %d\n", ret);
                        goto err_irq;
@@ -1880,7 +1880,7 @@ int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
                /* Treat errors as non-critical */
                ret = mfd_add_devices(wm831x->dev, wm831x_num, backlight_devs,
                                      ARRAY_SIZE(backlight_devs), NULL,
-                                     0);
+                                     0, NULL);
                if (ret < 0)
                        dev_err(wm831x->dev, "Failed to add backlight: %d\n",
                                ret);
index 4b7d378551d58daf515532dbcaea2c6f35c17f1e..639ca359242f849cebfe407333e6107c0bca4bbb 100644 (file)
@@ -70,7 +70,7 @@ static int wm8400_register_codec(struct wm8400 *wm8400)
                .pdata_size = sizeof(*wm8400),
        };
 
-       return mfd_add_devices(wm8400->dev, -1, &cell, 1, NULL, 0);
+       return mfd_add_devices(wm8400->dev, -1, &cell, 1, NULL, 0, NULL);
 }
 
 /*
index eec74aa55fdfe28c46476c3360e1632bfd51cf7f..2febf88cfce8847383be5a4fe8ab80ae89fe48e2 100644 (file)
@@ -414,7 +414,7 @@ static __devinit int wm8994_device_init(struct wm8994 *wm8994, int irq)
        ret = mfd_add_devices(wm8994->dev, -1,
                              wm8994_regulator_devs,
                              ARRAY_SIZE(wm8994_regulator_devs),
-                             NULL, 0);
+                             NULL, 0, NULL);
        if (ret != 0) {
                dev_err(wm8994->dev, "Failed to add children: %d\n", ret);
                goto err;
@@ -648,7 +648,7 @@ static __devinit int wm8994_device_init(struct wm8994 *wm8994, int irq)
 
        ret = mfd_add_devices(wm8994->dev, -1,
                              wm8994_devs, ARRAY_SIZE(wm8994_devs),
-                             NULL, 0);
+                             NULL, 0, NULL);
        if (ret != 0) {
                dev_err(wm8994->dev, "Failed to add children: %d\n", ret);
                goto err_irq;
index f1c84decb192638e02aa13488e58c855d7335704..172a768036d87d700c018d36cdfdf9dc7093ae4d 100644 (file)
@@ -1411,7 +1411,8 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
                /* complete ongoing async transfer before issuing discard */
                if (card->host->areq)
                        mmc_blk_issue_rw_rq(mq, NULL);
-               if (req->cmd_flags & REQ_SECURE)
+               if (req->cmd_flags & REQ_SECURE &&
+                       !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
                        ret = mmc_blk_issue_secdiscard_rq(mq, req);
                else
                        ret = mmc_blk_issue_discard_rq(mq, req);
@@ -1716,6 +1717,7 @@ force_ro_fail:
 #define CID_MANFID_SANDISK     0x2
 #define CID_MANFID_TOSHIBA     0x11
 #define CID_MANFID_MICRON      0x13
+#define CID_MANFID_SAMSUNG     0x15
 
 static const struct mmc_fixup blk_fixups[] =
 {
@@ -1752,6 +1754,28 @@ static const struct mmc_fixup blk_fixups[] =
        MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc,
                  MMC_QUIRK_LONG_READ_TIME),
 
+       /*
+        * On these Samsung MoviNAND parts, performing secure erase or
+        * secure trim can result in unrecoverable corruption due to a
+        * firmware bug.
+        */
+       MMC_FIXUP("M8G2FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+                 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+       MMC_FIXUP("MAG4FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+                 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+       MMC_FIXUP("MBG8FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+                 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+       MMC_FIXUP("MCGAFA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+                 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+       MMC_FIXUP("VAL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+                 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+       MMC_FIXUP("VYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+                 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+       MMC_FIXUP("KYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+                 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+       MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+                 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+
        END_FIXUP
 };
 
index 322412cec4eeb8ca6970d2d12f37c7c83bbba42e..a53c7c478e054c8c47df5a5a219a2b1ed1e6836b 100644 (file)
@@ -81,6 +81,7 @@ struct atmel_mci_caps {
        bool    has_bad_data_ordering;
        bool    need_reset_after_xfer;
        bool    need_blksz_mul_4;
+       bool    need_notbusy_for_read_ops;
 };
 
 struct atmel_mci_dma {
@@ -1625,7 +1626,8 @@ static void atmci_tasklet_func(unsigned long priv)
                                __func__);
                        atmci_set_completed(host, EVENT_XFER_COMPLETE);
 
-                       if (host->data->flags & MMC_DATA_WRITE) {
+                       if (host->caps.need_notbusy_for_read_ops ||
+                          (host->data->flags & MMC_DATA_WRITE)) {
                                atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
                                state = STATE_WAITING_NOTBUSY;
                        } else if (host->mrq->stop) {
@@ -2218,6 +2220,7 @@ static void __init atmci_get_cap(struct atmel_mci *host)
        host->caps.has_bad_data_ordering = 1;
        host->caps.need_reset_after_xfer = 1;
        host->caps.need_blksz_mul_4 = 1;
+       host->caps.need_notbusy_for_read_ops = 0;
 
        /* keep only major version number */
        switch (version & 0xf00) {
@@ -2238,6 +2241,7 @@ static void __init atmci_get_cap(struct atmel_mci *host)
        case 0x200:
                host->caps.has_rwproof = 1;
                host->caps.need_blksz_mul_4 = 0;
+               host->caps.need_notbusy_for_read_ops = 1;
        case 0x100:
                host->caps.has_bad_data_ordering = 0;
                host->caps.need_reset_after_xfer = 0;
index 03666174ca483e61a351626ad428a9bc833ef981..a17dd7363cebedc69c41d28b47b6c4fd895a327a 100644 (file)
 #define bfin_write_SDH_CFG             bfin_write_RSI_CFG
 #endif
 
-struct dma_desc_array {
-       unsigned long   start_addr;
-       unsigned short  cfg;
-       unsigned short  x_count;
-       short           x_modify;
-} __packed;
-
 struct sdh_host {
        struct mmc_host         *mmc;
        spinlock_t              lock;
index 72dc3cde646d063513f3a55fc8ba0ec74197261b..af40d227bece22ecb51bb0862df913aadc3ce6da 100644 (file)
@@ -627,6 +627,7 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot)
 {
        struct dw_mci *host = slot->host;
        u32 div;
+       u32 clk_en_a;
 
        if (slot->clock != host->current_speed) {
                div = host->bus_hz / slot->clock;
@@ -659,9 +660,11 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot)
                mci_send_cmd(slot,
                             SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
 
-               /* enable clock */
-               mci_writel(host, CLKENA, ((SDMMC_CLKEN_ENABLE |
-                          SDMMC_CLKEN_LOW_PWR) << slot->id));
+               /* enable clock; only low power if no SDIO */
+               clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
+               if (!(mci_readl(host, INTMASK) & SDMMC_INT_SDIO(slot->id)))
+                       clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
+               mci_writel(host, CLKENA, clk_en_a);
 
                /* inform CIU */
                mci_send_cmd(slot,
@@ -862,6 +865,30 @@ static int dw_mci_get_cd(struct mmc_host *mmc)
        return present;
 }
 
+/*
+ * Disable lower power mode.
+ *
+ * Low power mode will stop the card clock when idle.  According to the
+ * description of the CLKENA register we should disable low power mode
+ * for SDIO cards if we need SDIO interrupts to work.
+ *
+ * This function is fast if low power mode is already disabled.
+ */
+static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
+{
+       struct dw_mci *host = slot->host;
+       u32 clk_en_a;
+       const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
+
+       clk_en_a = mci_readl(host, CLKENA);
+
+       if (clk_en_a & clken_low_pwr) {
+               mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
+               mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
+                            SDMMC_CMD_PRV_DAT_WAIT, 0);
+       }
+}
+
 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
 {
        struct dw_mci_slot *slot = mmc_priv(mmc);
@@ -871,6 +898,14 @@ static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
        /* Enable/disable Slot Specific SDIO interrupt */
        int_mask = mci_readl(host, INTMASK);
        if (enb) {
+               /*
+                * Turn off low power mode if it was enabled.  This is a bit of
+                * a heavy operation and we disable / enable IRQs a lot, so
+                * we'll leave low power mode disabled and it will get
+                * re-enabled again in dw_mci_setup_bus().
+                */
+               dw_mci_disable_low_power(slot);
+
                mci_writel(host, INTMASK,
                           (int_mask | SDMMC_INT_SDIO(slot->id)));
        } else {
@@ -1429,22 +1464,10 @@ static void dw_mci_read_data_pio(struct dw_mci *host)
                        nbytes += len;
                        remain -= len;
                } while (remain);
-               sg_miter->consumed = offset;
 
+               sg_miter->consumed = offset;
                status = mci_readl(host, MINTSTS);
                mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
-               if (status & DW_MCI_DATA_ERROR_FLAGS) {
-                       host->data_status = status;
-                       data->bytes_xfered += nbytes;
-                       sg_miter_stop(sg_miter);
-                       host->sg = NULL;
-                       smp_wmb();
-
-                       set_bit(EVENT_DATA_ERROR, &host->pending_events);
-
-                       tasklet_schedule(&host->tasklet);
-                       return;
-               }
        } while (status & SDMMC_INT_RXDR); /*if the RXDR is ready read again*/
        data->bytes_xfered += nbytes;
 
@@ -1497,23 +1520,10 @@ static void dw_mci_write_data_pio(struct dw_mci *host)
                        nbytes += len;
                        remain -= len;
                } while (remain);
-               sg_miter->consumed = offset;
 
+               sg_miter->consumed = offset;
                status = mci_readl(host, MINTSTS);
                mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
-               if (status & DW_MCI_DATA_ERROR_FLAGS) {
-                       host->data_status = status;
-                       data->bytes_xfered += nbytes;
-                       sg_miter_stop(sg_miter);
-                       host->sg = NULL;
-
-                       smp_wmb();
-
-                       set_bit(EVENT_DATA_ERROR, &host->pending_events);
-
-                       tasklet_schedule(&host->tasklet);
-                       return;
-               }
        } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
        data->bytes_xfered += nbytes;
 
@@ -1547,12 +1557,11 @@ static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
 {
        struct dw_mci *host = dev_id;
-       u32 status, pending;
+       u32 pending;
        unsigned int pass_count = 0;
        int i;
 
        do {
-               status = mci_readl(host, RINTSTS);
                pending = mci_readl(host, MINTSTS); /* read-only mask reg */
 
                /*
@@ -1570,7 +1579,7 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
 
                if (pending & DW_MCI_CMD_ERROR_FLAGS) {
                        mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
-                       host->cmd_status = status;
+                       host->cmd_status = pending;
                        smp_wmb();
                        set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
                }
@@ -1578,18 +1587,16 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
                if (pending & DW_MCI_DATA_ERROR_FLAGS) {
                        /* if there is an error report DATA_ERROR */
                        mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
-                       host->data_status = status;
+                       host->data_status = pending;
                        smp_wmb();
                        set_bit(EVENT_DATA_ERROR, &host->pending_events);
-                       if (!(pending & (SDMMC_INT_DTO | SDMMC_INT_DCRC |
-                                        SDMMC_INT_SBE | SDMMC_INT_EBE)))
-                               tasklet_schedule(&host->tasklet);
+                       tasklet_schedule(&host->tasklet);
                }
 
                if (pending & SDMMC_INT_DATA_OVER) {
                        mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
                        if (!host->data_status)
-                               host->data_status = status;
+                               host->data_status = pending;
                        smp_wmb();
                        if (host->dir_status == DW_MCI_RECV_STATUS) {
                                if (host->sg != NULL)
@@ -1613,7 +1620,7 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
 
                if (pending & SDMMC_INT_CMD_DONE) {
                        mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
-                       dw_mci_cmd_interrupt(host, status);
+                       dw_mci_cmd_interrupt(host, pending);
                }
 
                if (pending & SDMMC_INT_CD) {
index a51f9309ffbb1e49947939fb60d6c6dcc8e3be93..ad3fcea1269ebc179105563333b01cfdf139a9ed 100644 (file)
@@ -285,11 +285,11 @@ static irqreturn_t mxs_mmc_irq_handler(int irq, void *dev_id)
        writel(stat & MXS_MMC_IRQ_BITS,
               host->base + HW_SSP_CTRL1(host) + STMP_OFFSET_REG_CLR);
 
+       spin_unlock(&host->lock);
+
        if ((stat & BM_SSP_CTRL1_SDIO_IRQ) && (stat & BM_SSP_CTRL1_SDIO_IRQ_EN))
                mmc_signal_sdio_irq(host->mmc);
 
-       spin_unlock(&host->lock);
-
        if (stat & BM_SSP_CTRL1_RESP_TIMEOUT_IRQ)
                cmd->error = -ETIMEDOUT;
        else if (stat & BM_SSP_CTRL1_RESP_ERR_IRQ)
@@ -644,11 +644,6 @@ static void mxs_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
                       host->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
                writel(BM_SSP_CTRL1_SDIO_IRQ_EN,
                       host->base + HW_SSP_CTRL1(host) + STMP_OFFSET_REG_SET);
-
-               if (readl(host->base + HW_SSP_STATUS(host)) &
-                               BM_SSP_STATUS_SDIO_IRQ)
-                       mmc_signal_sdio_irq(host->mmc);
-
        } else {
                writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK,
                       host->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
@@ -657,6 +652,11 @@ static void mxs_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
        }
 
        spin_unlock_irqrestore(&host->lock, flags);
+
+       if (enable && readl(host->base + HW_SSP_STATUS(host)) &
+                       BM_SSP_STATUS_SDIO_IRQ)
+               mmc_signal_sdio_irq(host->mmc);
+
 }
 
 static const struct mmc_host_ops mxs_mmc_ops = {
index 50e08f03aa65ce72ce3876deb304db2fbed8a22f..a5999a74496af218c540959ee757b7c1ad3f1365 100644 (file)
@@ -668,7 +668,7 @@ mmc_omap_clk_timer(unsigned long data)
 static void
 mmc_omap_xfer_data(struct mmc_omap_host *host, int write)
 {
-       int n;
+       int n, nwords;
 
        if (host->buffer_bytes_left == 0) {
                host->sg_idx++;
@@ -678,15 +678,23 @@ mmc_omap_xfer_data(struct mmc_omap_host *host, int write)
        n = 64;
        if (n > host->buffer_bytes_left)
                n = host->buffer_bytes_left;
+
+       nwords = n / 2;
+       nwords += n & 1; /* handle odd number of bytes to transfer */
+
        host->buffer_bytes_left -= n;
        host->total_bytes_left -= n;
        host->data->bytes_xfered += n;
 
        if (write) {
-               __raw_writesw(host->virt_base + OMAP_MMC_REG(host, DATA), host->buffer, n);
+               __raw_writesw(host->virt_base + OMAP_MMC_REG(host, DATA),
+                             host->buffer, nwords);
        } else {
-               __raw_readsw(host->virt_base + OMAP_MMC_REG(host, DATA), host->buffer, n);
+               __raw_readsw(host->virt_base + OMAP_MMC_REG(host, DATA),
+                            host->buffer, nwords);
        }
+
+       host->buffer += nwords;
 }
 
 static inline void mmc_omap_report_irq(u16 status)
index b97b2f5dafdb4d15160701dd12060b05c394b464..d25f9ab9a54da919a6e54cc05743f74ad83362f1 100644 (file)
@@ -48,14 +48,14 @@ static inline void esdhc_set_clock(struct sdhci_host *host, unsigned int clock)
        int div = 1;
        u32 temp;
 
+       if (clock == 0)
+               goto out;
+
        temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
        temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN
                | ESDHC_CLOCK_MASK);
        sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
 
-       if (clock == 0)
-               goto out;
-
        while (host->max_clk / pre_div / 16 > clock && pre_div < 256)
                pre_div *= 2;
 
index 437bc193e170d6da377d6b7447adc5cc43d348bb..568307cc7caf8d882034011aa5303ad47eaf3bed 100644 (file)
@@ -340,7 +340,7 @@ retry:
         * of this LEB as it will be deleted and freed in 'ubi_add_to_av()'.
         */
        err = ubi_add_to_av(ubi, ai, new_aeb->pnum, new_aeb->ec, vid_hdr, 0);
-       kfree(new_aeb);
+       kmem_cache_free(ai->aeb_slab_cache, new_aeb);
        ubi_free_vid_hdr(ubi, vid_hdr);
        return err;
 
@@ -353,7 +353,7 @@ write_error:
                list_add(&new_aeb->u.list, &ai->erase);
                goto retry;
        }
-       kfree(new_aeb);
+       kmem_cache_free(ai->aeb_slab_cache, new_aeb);
 out_free:
        ubi_free_vid_hdr(ubi, vid_hdr);
        return err;
index a580db29e50360f8be44403e5893764261562549..26e7129332abc7e18bd7b27cd3f49d67976c3a16 100644 (file)
 #define INSTRUCTION_LOAD_TXB(n)        (0x40 + 2 * (n))
 #define INSTRUCTION_READ_RXB(n)        (((n) == 0) ? 0x90 : 0x94)
 #define INSTRUCTION_RESET      0xC0
+#define RTS_TXB0               0x01
+#define RTS_TXB1               0x02
+#define RTS_TXB2               0x04
+#define INSTRUCTION_RTS(n)     (0x80 | ((n) & 0x07))
+
 
 /* MPC251x registers */
 #define CANSTAT              0x0e
@@ -397,6 +402,7 @@ static void mcp251x_hw_tx_frame(struct spi_device *spi, u8 *buf,
 static void mcp251x_hw_tx(struct spi_device *spi, struct can_frame *frame,
                          int tx_buf_idx)
 {
+       struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
        u32 sid, eid, exide, rtr;
        u8 buf[SPI_TRANSFER_BUF_LEN];
 
@@ -418,7 +424,10 @@ static void mcp251x_hw_tx(struct spi_device *spi, struct can_frame *frame,
        buf[TXBDLC_OFF] = (rtr << DLC_RTR_SHIFT) | frame->can_dlc;
        memcpy(buf + TXBDAT_OFF, frame->data, frame->can_dlc);
        mcp251x_hw_tx_frame(spi, buf, frame->can_dlc, tx_buf_idx);
-       mcp251x_write_reg(spi, TXBCTRL(tx_buf_idx), TXBCTRL_TXREQ);
+
+       /* use INSTRUCTION_RTS, to avoid "repeated frame problem" */
+       priv->spi_tx_buf[0] = INSTRUCTION_RTS(1 << tx_buf_idx);
+       mcp251x_spi_trans(priv->spi, 1);
 }
 
 static void mcp251x_hw_rx_frame(struct spi_device *spi, u8 *buf,
index 4f50145f64839f519c232d0e4ec79e7fedf2bd34..662c5f7eb0c54af4cb3c788959109a7693798629 100644 (file)
@@ -109,7 +109,9 @@ static int sp_probe(struct platform_device *pdev)
        priv = netdev_priv(dev);
 
        dev->irq = res_irq->start;
-       priv->irq_flags = res_irq->flags & (IRQF_TRIGGER_MASK | IRQF_SHARED);
+       priv->irq_flags = res_irq->flags & IRQF_TRIGGER_MASK;
+       if (res_irq->flags & IORESOURCE_IRQ_SHAREABLE)
+               priv->irq_flags |= IRQF_SHARED;
        priv->reg_base = addr;
        /* The CAN clock frequency is half the oscillator clock frequency */
        priv->can.clock.freq = pdata->osc_freq / 2;
index 3105961756767cb3b52bc18bce8a248e7ef110b7..b595d3422b9f759d86c760370b30efcb04f76575 100644 (file)
@@ -150,7 +150,7 @@ int softing_load_fw(const char *file, struct softing *card,
        const uint8_t *mem, *end, *dat;
        uint16_t type, len;
        uint32_t addr;
-       uint8_t *buf = NULL;
+       uint8_t *buf = NULL, *new_buf;
        int buflen = 0;
        int8_t type_end = 0;
 
@@ -199,11 +199,12 @@ int softing_load_fw(const char *file, struct softing *card,
                if (len > buflen) {
                        /* align buflen */
                        buflen = (len + (1024-1)) & ~(1024-1);
-                       buf = krealloc(buf, buflen, GFP_KERNEL);
-                       if (!buf) {
+                       new_buf = krealloc(buf, buflen, GFP_KERNEL);
+                       if (!new_buf) {
                                ret = -ENOMEM;
                                goto failed;
                        }
+                       buf = new_buf;
                }
                /* verify record data */
                memcpy_fromio(buf, &dpram[addr + offset], len);
index 463b9ec57d8077c8a1599457974ab15dc1210536..6d1a24acb77e1cb9332906759811980aa537b4e2 100644 (file)
@@ -1708,9 +1708,6 @@ struct bnx2x_func_init_params {
                        continue;               \
                else
 
-#define for_each_napi_rx_queue(bp, var) \
-       for ((var) = 0; (var) < bp->num_napi_queues; (var)++)
-
 /* Skip OOO FP */
 #define for_each_tx_queue(bp, var) \
        for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
index e879e19eb0d68926a883ee55ffcaaa194a8b513b..af20c6ee2cd9292dfa41c8693e6d8253dfe667e4 100644 (file)
@@ -2046,6 +2046,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
         */
        bnx2x_setup_tc(bp->dev, bp->max_cos);
 
+       /* Add all NAPI objects */
+       bnx2x_add_all_napi(bp);
        bnx2x_napi_enable(bp);
 
        /* set pf load just before approaching the MCP */
@@ -2408,6 +2410,8 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
 
                /* Disable HW interrupts, NAPI */
                bnx2x_netif_stop(bp, 1);
+               /* Delete all NAPI objects */
+               bnx2x_del_all_napi(bp);
 
                /* Release IRQs */
                bnx2x_free_irq(bp);
index dfa757e742966998fb8e9ea8b99f2667b0835a43..dfd86a55f1dcab583ad08342a21eaae9a97be53f 100644 (file)
@@ -710,17 +710,15 @@ static inline u16 bnx2x_tx_avail(struct bnx2x *bp,
        prod = txdata->tx_bd_prod;
        cons = txdata->tx_bd_cons;
 
-       /* NUM_TX_RINGS = number of "next-page" entries
-          It will be used as a threshold */
-       used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
+       used = SUB_S16(prod, cons);
 
 #ifdef BNX2X_STOP_ON_ERROR
        WARN_ON(used < 0);
-       WARN_ON(used > bp->tx_ring_size);
-       WARN_ON((bp->tx_ring_size - used) > MAX_TX_AVAIL);
+       WARN_ON(used > txdata->tx_ring_size);
+       WARN_ON((txdata->tx_ring_size - used) > MAX_TX_AVAIL);
 #endif
 
-       return (s16)(bp->tx_ring_size) - used;
+       return (s16)(txdata->tx_ring_size) - used;
 }
 
 static inline int bnx2x_tx_queue_has_work(struct bnx2x_fp_txdata *txdata)
@@ -792,7 +790,7 @@ static inline void bnx2x_add_all_napi(struct bnx2x *bp)
        bp->num_napi_queues = bp->num_queues;
 
        /* Add NAPI objects */
-       for_each_napi_rx_queue(bp, i)
+       for_each_rx_queue(bp, i)
                netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
                               bnx2x_poll, BNX2X_NAPI_WEIGHT);
 }
@@ -801,7 +799,7 @@ static inline void bnx2x_del_all_napi(struct bnx2x *bp)
 {
        int i;
 
-       for_each_napi_rx_queue(bp, i)
+       for_each_rx_queue(bp, i)
                netif_napi_del(&bnx2x_fp(bp, i, napi));
 }
 
@@ -1088,6 +1086,7 @@ static inline void bnx2x_init_txdata(struct bnx2x *bp,
        txdata->txq_index = txq_index;
        txdata->tx_cons_sb = tx_cons_sb;
        txdata->parent_fp = fp;
+       txdata->tx_ring_size = IS_FCOE_FP(fp) ? MAX_TX_AVAIL : bp->tx_ring_size;
 
        DP(NETIF_MSG_IFUP, "created tx data cid %d, txq %d\n",
           txdata->cid, txdata->txq_index);
index 3e4cff9b1ebee60a4fa4cedbb90fdeaecc73b580..b926f58e983bbfe083e004c362df6203efb17a3e 100644 (file)
@@ -401,11 +401,11 @@ static const struct reg_addr reg_addrs[] = {
        { 0x70000, 8, RI_ALL_ONLINE },
        { 0x70020, 8184, RI_ALL_OFFLINE },
        { 0x78000, 8192, RI_E3E3B0_OFFLINE },
-       { 0x85000, 3, RI_ALL_ONLINE },
-       { 0x8501c, 7, RI_ALL_ONLINE },
-       { 0x85048, 1, RI_ALL_ONLINE },
-       { 0x85200, 32, RI_ALL_ONLINE },
-       { 0xb0000, 16384, RI_E1H_ONLINE },
+       { 0x85000, 3, RI_ALL_OFFLINE },
+       { 0x8501c, 7, RI_ALL_OFFLINE },
+       { 0x85048, 1, RI_ALL_OFFLINE },
+       { 0x85200, 32, RI_ALL_OFFLINE },
+       { 0xb0000, 16384, RI_E1H_OFFLINE },
        { 0xc1000, 7, RI_ALL_ONLINE },
        { 0xc103c, 2, RI_E2E3E3B0_ONLINE },
        { 0xc1800, 2, RI_ALL_ONLINE },
@@ -581,17 +581,12 @@ static const struct reg_addr reg_addrs[] = {
        { 0x140188, 3, RI_E1E1HE2E3_ONLINE },
        { 0x140194, 13, RI_ALL_ONLINE },
        { 0x140200, 6, RI_E1E1HE2E3_ONLINE },
-       { 0x140220, 4, RI_E2E3_ONLINE },
-       { 0x140240, 4, RI_E2E3_ONLINE },
        { 0x140260, 4, RI_E2E3_ONLINE },
        { 0x140280, 4, RI_E2E3_ONLINE },
-       { 0x1402a0, 4, RI_E2E3_ONLINE },
-       { 0x1402c0, 4, RI_E2E3_ONLINE },
        { 0x1402e0, 2, RI_E2E3_ONLINE },
        { 0x1402e8, 2, RI_E2E3E3B0_ONLINE },
        { 0x1402f0, 9, RI_E2E3_ONLINE },
        { 0x140314, 44, RI_E3B0_ONLINE },
-       { 0x1403d0, 70, RI_E3B0_ONLINE },
        { 0x144000, 4, RI_E1E1H_ONLINE },
        { 0x148000, 4, RI_E1E1H_ONLINE },
        { 0x14c000, 4, RI_E1E1H_ONLINE },
@@ -704,7 +699,6 @@ static const struct reg_addr reg_addrs[] = {
        { 0x180398, 1, RI_E2E3E3B0_ONLINE },
        { 0x1803a0, 5, RI_E2E3E3B0_ONLINE },
        { 0x1803b4, 2, RI_E3E3B0_ONLINE },
-       { 0x180400, 1, RI_ALL_ONLINE },
        { 0x180404, 255, RI_E1E1H_OFFLINE },
        { 0x181000, 4, RI_ALL_ONLINE },
        { 0x181010, 1020, RI_ALL_OFFLINE },
@@ -800,9 +794,9 @@ static const struct reg_addr reg_addrs[] = {
        { 0x1b905c, 1, RI_E3E3B0_ONLINE },
        { 0x1b9064, 1, RI_E3B0_ONLINE },
        { 0x1b9080, 10, RI_E3B0_ONLINE },
-       { 0x1b9400, 14, RI_E2E3E3B0_ONLINE },
-       { 0x1b943c, 19, RI_E2E3E3B0_ONLINE },
-       { 0x1b9490, 10, RI_E2E3E3B0_ONLINE },
+       { 0x1b9400, 14, RI_E2E3E3B0_OFFLINE },
+       { 0x1b943c, 19, RI_E2E3E3B0_OFFLINE },
+       { 0x1b9490, 10, RI_E2E3E3B0_OFFLINE },
        { 0x1c0000, 2, RI_ALL_ONLINE },
        { 0x200000, 65, RI_ALL_ONLINE },
        { 0x20014c, 2, RI_E1HE2E3E3B0_ONLINE },
@@ -814,7 +808,6 @@ static const struct reg_addr reg_addrs[] = {
        { 0x200398, 1, RI_E2E3E3B0_ONLINE },
        { 0x2003a0, 1, RI_E2E3E3B0_ONLINE },
        { 0x2003a8, 2, RI_E2E3E3B0_ONLINE },
-       { 0x200400, 1, RI_ALL_ONLINE },
        { 0x200404, 255, RI_E1E1H_OFFLINE },
        { 0x202000, 4, RI_ALL_ONLINE },
        { 0x202010, 2044, RI_ALL_OFFLINE },
@@ -921,7 +914,6 @@ static const struct reg_addr reg_addrs[] = {
        { 0x280398, 1, RI_E2E3E3B0_ONLINE },
        { 0x2803a0, 1, RI_E2E3E3B0_ONLINE },
        { 0x2803a8, 2, RI_E2E3E3B0_ONLINE },
-       { 0x280400, 1, RI_ALL_ONLINE },
        { 0x280404, 255, RI_E1E1H_OFFLINE },
        { 0x282000, 4, RI_ALL_ONLINE },
        { 0x282010, 2044, RI_ALL_OFFLINE },
@@ -1031,7 +1023,6 @@ static const struct reg_addr reg_addrs[] = {
        { 0x300398, 1, RI_E2E3E3B0_ONLINE },
        { 0x3003a0, 1, RI_E2E3E3B0_ONLINE },
        { 0x3003a8, 2, RI_E2E3E3B0_ONLINE },
-       { 0x300400, 1, RI_ALL_ONLINE },
        { 0x300404, 255, RI_E1E1H_OFFLINE },
        { 0x302000, 4, RI_ALL_ONLINE },
        { 0x302010, 2044, RI_ALL_OFFLINE },
index fc4e0e3885b039f0cb7dff48edade89e211de15b..ebf40cd7aa1050d716683e806eda505050bf1e40 100644 (file)
@@ -775,7 +775,7 @@ static void bnx2x_get_regs(struct net_device *dev,
        struct bnx2x *bp = netdev_priv(dev);
        struct dump_hdr dump_hdr = {0};
 
-       regs->version = 0;
+       regs->version = 1;
        memset(p, 0, regs->len);
 
        if (!netif_running(bp->dev))
@@ -1587,6 +1587,12 @@ static int bnx2x_set_pauseparam(struct net_device *dev,
                        bp->link_params.req_flow_ctrl[cfg_idx] =
                                BNX2X_FLOW_CTRL_AUTO;
                }
+               bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_NONE;
+               if (epause->rx_pause)
+                       bp->link_params.req_fc_auto_adv |= BNX2X_FLOW_CTRL_RX;
+
+               if (epause->tx_pause)
+                       bp->link_params.req_fc_auto_adv |= BNX2X_FLOW_CTRL_TX;
        }
 
        DP(BNX2X_MSG_ETHTOOL,
@@ -2888,11 +2894,9 @@ static void bnx2x_get_channels(struct net_device *dev,
  */
 static void bnx2x_change_num_queues(struct bnx2x *bp, int num_rss)
 {
-       bnx2x_del_all_napi(bp);
        bnx2x_disable_msi(bp);
        BNX2X_NUM_QUEUES(bp) = num_rss + NON_ETH_CONTEXT_USE;
        bnx2x_set_int_mode(bp);
-       bnx2x_add_all_napi(bp);
 }
 
 /**
index f4beb46c4709af8291ed7aecc373d7a72a366f59..b046beb435b2c490f70ef3bc2f67bf3af16bfcb5 100644 (file)
@@ -2667,9 +2667,11 @@ int bnx2x_update_pfc(struct link_params *params,
                return bnx2x_status;
 
        DP(NETIF_MSG_LINK, "About to update PFC in BMAC\n");
-       if (CHIP_IS_E3(bp))
-               bnx2x_update_pfc_xmac(params, vars, 0);
-       else {
+
+       if (CHIP_IS_E3(bp)) {
+               if (vars->mac_type == MAC_TYPE_XMAC)
+                       bnx2x_update_pfc_xmac(params, vars, 0);
+       } else {
                val = REG_RD(bp, MISC_REG_RESET_REG_2);
                if ((val &
                     (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port))
@@ -5432,7 +5434,7 @@ static int bnx2x_get_link_speed_duplex(struct bnx2x_phy *phy,
                switch (speed_mask) {
                case GP_STATUS_10M:
                        vars->line_speed = SPEED_10;
-                       if (vars->duplex == DUPLEX_FULL)
+                       if (is_duplex == DUPLEX_FULL)
                                vars->link_status |= LINK_10TFD;
                        else
                                vars->link_status |= LINK_10THD;
@@ -5440,7 +5442,7 @@ static int bnx2x_get_link_speed_duplex(struct bnx2x_phy *phy,
 
                case GP_STATUS_100M:
                        vars->line_speed = SPEED_100;
-                       if (vars->duplex == DUPLEX_FULL)
+                       if (is_duplex == DUPLEX_FULL)
                                vars->link_status |= LINK_100TXFD;
                        else
                                vars->link_status |= LINK_100TXHD;
@@ -5449,7 +5451,7 @@ static int bnx2x_get_link_speed_duplex(struct bnx2x_phy *phy,
                case GP_STATUS_1G:
                case GP_STATUS_1G_KX:
                        vars->line_speed = SPEED_1000;
-                       if (vars->duplex == DUPLEX_FULL)
+                       if (is_duplex == DUPLEX_FULL)
                                vars->link_status |= LINK_1000TFD;
                        else
                                vars->link_status |= LINK_1000THD;
@@ -5457,7 +5459,7 @@ static int bnx2x_get_link_speed_duplex(struct bnx2x_phy *phy,
 
                case GP_STATUS_2_5G:
                        vars->line_speed = SPEED_2500;
-                       if (vars->duplex == DUPLEX_FULL)
+                       if (is_duplex == DUPLEX_FULL)
                                vars->link_status |= LINK_2500TFD;
                        else
                                vars->link_status |= LINK_2500THD;
@@ -5531,6 +5533,7 @@ static int bnx2x_link_settings_status(struct bnx2x_phy *phy,
 
        if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) {
                if (SINGLE_MEDIA_DIRECT(params)) {
+                       vars->duplex = duplex;
                        bnx2x_flow_ctrl_resolve(phy, params, vars, gp_status);
                        if (phy->req_line_speed == SPEED_AUTO_NEG)
                                bnx2x_xgxs_an_resolve(phy, params, vars,
@@ -5625,6 +5628,7 @@ static int bnx2x_warpcore_read_status(struct bnx2x_phy *phy,
                                        LINK_STATUS_PARALLEL_DETECTION_USED;
                        }
                        bnx2x_ext_phy_resolve_fc(phy, params, vars);
+                       vars->duplex = duplex;
                }
        }
 
index 02b5a343b19506a2edc60be5dd6b41d781aadb85..211753e01f81530324e15c03fa5d0dc4d99b2114 100644 (file)
@@ -7561,8 +7561,14 @@ int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
        }
 
        rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
-       if (rc < 0)
+
+       if (rc == -EEXIST) {
+               DP(BNX2X_MSG_SP, "Failed to schedule ADD operations: %d\n", rc);
+               /* do not treat adding same MAC as error */
+               rc = 0;
+       } else if (rc < 0)
                BNX2X_ERR("%s MAC failed\n", (set ? "Set" : "Del"));
+
        return rc;
 }
 
@@ -8427,6 +8433,8 @@ unload_error:
 
        /* Disable HW interrupts, NAPI */
        bnx2x_netif_stop(bp, 1);
+       /* Delete all NAPI objects */
+       bnx2x_del_all_napi(bp);
 
        /* Release IRQs */
        bnx2x_free_irq(bp);
@@ -10292,13 +10300,11 @@ static void __devinit bnx2x_get_fcoe_info(struct bnx2x *bp)
                                dev_info.port_hw_config[port].
                                 fcoe_wwn_node_name_lower);
        } else if (!IS_MF_SD(bp)) {
-               u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
-
                /*
                 * Read the WWN info only if the FCoE feature is enabled for
                 * this function.
                 */
-               if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD)
+               if (BNX2X_MF_EXT_PROTOCOL_FCOE(bp) && !CHIP_IS_E1x(bp))
                        bnx2x_get_ext_wwn_info(bp, func);
 
        } else if (IS_MF_FCOE_SD(bp))
@@ -11071,7 +11077,14 @@ static int bnx2x_set_uc_list(struct bnx2x *bp)
        netdev_for_each_uc_addr(ha, dev) {
                rc = bnx2x_set_mac_one(bp, bnx2x_uc_addr(ha), mac_obj, true,
                                       BNX2X_UC_LIST_MAC, &ramrod_flags);
-               if (rc < 0) {
+               if (rc == -EEXIST) {
+                       DP(BNX2X_MSG_SP,
+                          "Failed to schedule ADD operations: %d\n", rc);
+                       /* do not treat adding same MAC as error */
+                       rc = 0;
+
+               } else if (rc < 0) {
+
                        BNX2X_ERR("Failed to schedule ADD operations: %d\n",
                                  rc);
                        return rc;
@@ -11229,10 +11242,12 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 static void poll_bnx2x(struct net_device *dev)
 {
        struct bnx2x *bp = netdev_priv(dev);
+       int i;
 
-       disable_irq(bp->pdev->irq);
-       bnx2x_interrupt(bp->pdev->irq, dev);
-       enable_irq(bp->pdev->irq);
+       for_each_eth_queue(bp, i) {
+               struct bnx2x_fastpath *fp = &bp->fp[i];
+               napi_schedule(&bnx2x_fp(bp, fp->index, napi));
+       }
 }
 #endif
 
@@ -11899,9 +11914,6 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
         */
        bnx2x_set_int_mode(bp);
 
-       /* Add all NAPI objects */
-       bnx2x_add_all_napi(bp);
-
        rc = register_netdev(dev);
        if (rc) {
                dev_err(&pdev->dev, "Cannot register net device\n");
@@ -11976,9 +11988,6 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
 
        unregister_netdev(dev);
 
-       /* Delete all NAPI objects */
-       bnx2x_del_all_napi(bp);
-
        /* Power on: we can't let PCI layer write to us while we are in D3 */
        bnx2x_set_power_state(bp, PCI_D0);
 
@@ -12025,6 +12034,8 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
        bnx2x_tx_disable(bp);
 
        bnx2x_netif_stop(bp, 0);
+       /* Delete all NAPI objects */
+       bnx2x_del_all_napi(bp);
 
        del_timer_sync(&bp->timer);
 
index 332db64dd5bea11eed0cf878565c5c2e573f5c3e..a1d0446b39b356dd69e0b77e69f6285ba37edb63 100644 (file)
@@ -101,6 +101,11 @@ static void bnx2x_hw_stats_post(struct bnx2x *bp)
        if (CHIP_REV_IS_SLOW(bp))
                return;
 
+       /* Update MCP's statistics if possible */
+       if (bp->func_stx)
+               memcpy(bnx2x_sp(bp, func_stats), &bp->func_stats,
+                      sizeof(bp->func_stats));
+
        /* loader */
        if (bp->executer_idx) {
                int loader_idx = PMF_DMAE_C(bp);
@@ -128,8 +133,6 @@ static void bnx2x_hw_stats_post(struct bnx2x *bp)
 
        } else if (bp->func_stx) {
                *stats_comp = 0;
-               memcpy(bnx2x_sp(bp, func_stats), &bp->func_stats,
-                      sizeof(bp->func_stats));
                bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
        }
 }
@@ -1151,9 +1154,11 @@ static void bnx2x_stats_update(struct bnx2x *bp)
        if (bp->port.pmf)
                bnx2x_hw_stats_update(bp);
 
-       if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
-               BNX2X_ERR("storm stats were not updated for 3 times\n");
-               bnx2x_panic();
+       if (bnx2x_storm_stats_update(bp)) {
+               if (bp->stats_pending++ == 3) {
+                       BNX2X_ERR("storm stats were not updated for 3 times\n");
+                       bnx2x_panic();
+               }
                return;
        }
 
index 845b2020f291cc20ad8223c8165512604314e812..138446957786a9ce0845a7e8221af4887b904ce3 100644 (file)
@@ -1243,6 +1243,7 @@ static void set_multicast_list(struct net_device *dev)
 {
        struct net_local *lp = netdev_priv(dev);
        unsigned long flags;
+       u16 cfg;
 
        spin_lock_irqsave(&lp->lock, flags);
        if (dev->flags & IFF_PROMISC)
@@ -1260,11 +1261,10 @@ static void set_multicast_list(struct net_device *dev)
        /* in promiscuous mode, we accept errored packets,
         * so we have to enable interrupts on them also
         */
-       writereg(dev, PP_RxCFG,
-                (lp->curr_rx_cfg |
-                 (lp->rx_mode == RX_ALL_ACCEPT)
-                 ? (RX_CRC_ERROR_ENBL | RX_RUNT_ENBL | RX_EXTRA_DATA_ENBL)
-                 : 0));
+       cfg = lp->curr_rx_cfg;
+       if (lp->rx_mode == RX_ALL_ACCEPT)
+               cfg |= RX_CRC_ERROR_ENBL | RX_RUNT_ENBL | RX_EXTRA_DATA_ENBL;
+       writereg(dev, PP_RxCFG, cfg);
        spin_unlock_irqrestore(&lp->lock, flags);
 }
 
index 7fac97b4bb59c19ecaca1073d84c6a183ecd17b1..8c63d06ab12b6ccf899fae8fa13f75904beb22c1 100644 (file)
@@ -259,7 +259,7 @@ int be_process_mcc(struct be_adapter *adapter)
        int num = 0, status = 0;
        struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
 
-       spin_lock_bh(&adapter->mcc_cq_lock);
+       spin_lock(&adapter->mcc_cq_lock);
        while ((compl = be_mcc_compl_get(adapter))) {
                if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
                        /* Interpret flags as an async trailer */
@@ -280,7 +280,7 @@ int be_process_mcc(struct be_adapter *adapter)
        if (num)
                be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);
 
-       spin_unlock_bh(&adapter->mcc_cq_lock);
+       spin_unlock(&adapter->mcc_cq_lock);
        return status;
 }
 
@@ -295,7 +295,9 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
                if (be_error(adapter))
                        return -EIO;
 
+               local_bh_disable();
                status = be_process_mcc(adapter);
+               local_bh_enable();
 
                if (atomic_read(&mcc_obj->q.used) == 0)
                        break;
index 90a903d83d8747ca26d711b7b3a188f81a554611..78b8aa8069f03c440ea6037f9dc3e86e81c62fe6 100644 (file)
@@ -3763,7 +3763,9 @@ static void be_worker(struct work_struct *work)
        /* when interrupts are not yet enabled, just reap any pending
        * mcc completions */
        if (!netif_running(adapter->netdev)) {
+               local_bh_disable();
                be_process_mcc(adapter);
+               local_bh_enable();
                goto reschedule;
        }
 
index 4605f7246687d0898ad82e9c769026c741d6d540..d3233f59a82e47b1d70a372c0973e1447d0ef90b 100644 (file)
@@ -1041,7 +1041,7 @@ static int gfar_probe(struct platform_device *ofdev)
 
        if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
                dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
-               dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+               dev->features |= NETIF_F_HW_VLAN_RX;
        }
 
        if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
index bd1f1ef91e1910f81f454a7332337870fa031331..ba4e0cea3506f80da5cc36a69f22994a7a3e470e 100644 (file)
@@ -139,8 +139,11 @@ struct znet_private {
 /* Only one can be built-in;-> */
 static struct net_device *znet_dev;
 
+#define NETIDBLK_MAGIC         "NETIDBLK"
+#define NETIDBLK_MAGIC_SIZE    8
+
 struct netidblk {
-       char magic[8];          /* The magic number (string) "NETIDBLK" */
+       char magic[NETIDBLK_MAGIC_SIZE];        /* The magic number (string) "NETIDBLK" */
        unsigned char netid[8]; /* The physical station address */
        char nettype, globalopt;
        char vendor[8];         /* The machine vendor and product name. */
@@ -373,14 +376,16 @@ static int __init znet_probe (void)
        struct znet_private *znet;
        struct net_device *dev;
        char *p;
+       char *plast = phys_to_virt(0x100000 - NETIDBLK_MAGIC_SIZE);
        int err = -ENOMEM;
 
        /* This code scans the region 0xf0000 to 0xfffff for a "NETIDBLK". */
-       for(p = (char *)phys_to_virt(0xf0000); p < (char *)phys_to_virt(0x100000); p++)
-               if (*p == 'N'  &&  strncmp(p, "NETIDBLK", 8) == 0)
+       for(p = (char *)phys_to_virt(0xf0000); p <= plast; p++)
+               if (*p == 'N' &&
+                   strncmp(p, NETIDBLK_MAGIC, NETIDBLK_MAGIC_SIZE) == 0)
                        break;
 
-       if (p >= (char *)phys_to_virt(0x100000)) {
+       if (p > plast) {
                if (znet_debug > 1)
                        printk(KERN_INFO "No Z-Note ethernet adaptor found.\n");
                return -ENODEV;
index 9010cea68bc3094a4b9b18b06cae1aa7d9796cd7..b68d28a130e664e2042bbb9a4335710964f861a7 100644 (file)
@@ -472,14 +472,9 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
        }
 
        if (adapter->rx_queue.queue_addr != NULL) {
-               if (!dma_mapping_error(dev, adapter->rx_queue.queue_dma)) {
-                       dma_unmap_single(dev,
-                                       adapter->rx_queue.queue_dma,
-                                       adapter->rx_queue.queue_len,
-                                       DMA_BIDIRECTIONAL);
-                       adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
-               }
-               kfree(adapter->rx_queue.queue_addr);
+               dma_free_coherent(dev, adapter->rx_queue.queue_len,
+                                 adapter->rx_queue.queue_addr,
+                                 adapter->rx_queue.queue_dma);
                adapter->rx_queue.queue_addr = NULL;
        }
 
@@ -556,10 +551,13 @@ static int ibmveth_open(struct net_device *netdev)
                goto err_out;
        }
 
+       dev = &adapter->vdev->dev;
+
        adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) *
                                                rxq_entries;
-       adapter->rx_queue.queue_addr = kmalloc(adapter->rx_queue.queue_len,
-                                               GFP_KERNEL);
+       adapter->rx_queue.queue_addr =
+           dma_alloc_coherent(dev, adapter->rx_queue.queue_len,
+                              &adapter->rx_queue.queue_dma, GFP_KERNEL);
 
        if (!adapter->rx_queue.queue_addr) {
                netdev_err(netdev, "unable to allocate rx queue pages\n");
@@ -567,19 +565,13 @@ static int ibmveth_open(struct net_device *netdev)
                goto err_out;
        }
 
-       dev = &adapter->vdev->dev;
-
        adapter->buffer_list_dma = dma_map_single(dev,
                        adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL);
        adapter->filter_list_dma = dma_map_single(dev,
                        adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL);
-       adapter->rx_queue.queue_dma = dma_map_single(dev,
-                       adapter->rx_queue.queue_addr,
-                       adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL);
 
        if ((dma_mapping_error(dev, adapter->buffer_list_dma)) ||
-           (dma_mapping_error(dev, adapter->filter_list_dma)) ||
-           (dma_mapping_error(dev, adapter->rx_queue.queue_dma))) {
+           (dma_mapping_error(dev, adapter->filter_list_dma))) {
                netdev_err(netdev, "unable to map filter or buffer list "
                           "pages\n");
                rc = -ENOMEM;
index cd153326c3cf8254543b0b544f0dfd8443cd6736..cb3356c9af803509efb2c58ff4f7d0667e689e1c 100644 (file)
@@ -310,6 +310,7 @@ struct e1000_adapter {
         */
        struct e1000_ring *tx_ring /* One per active queue */
                                                ____cacheline_aligned_in_smp;
+       u32 tx_fifo_limit;
 
        struct napi_struct napi;
 
index 46c3b1f9ff8997af685836bb82fb0e1bd5e599cf..d01a099475a143a44253b83ef212d8ad1f5935bf 100644 (file)
@@ -3516,6 +3516,15 @@ void e1000e_reset(struct e1000_adapter *adapter)
                break;
        }
 
+       /*
+        * Alignment of Tx data is on an arbitrary byte boundary with the
+        * maximum size per Tx descriptor limited only to the transmit
+        * allocation of the packet buffer minus 96 bytes with an upper
+        * limit of 24KB due to receive synchronization limitations.
+        */
+       adapter->tx_fifo_limit = min_t(u32, ((er32(PBA) >> 16) << 10) - 96,
+                                      24 << 10);
+
        /*
         * Disable Adaptive Interrupt Moderation if 2 full packets cannot
         * fit in receive buffer.
@@ -4785,12 +4794,9 @@ static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
        return 1;
 }
 
-#define E1000_MAX_PER_TXD      8192
-#define E1000_MAX_TXD_PWR      12
-
 static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb,
                        unsigned int first, unsigned int max_per_txd,
-                       unsigned int nr_frags, unsigned int mss)
+                       unsigned int nr_frags)
 {
        struct e1000_adapter *adapter = tx_ring->adapter;
        struct pci_dev *pdev = adapter->pdev;
@@ -5023,20 +5029,19 @@ static int __e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
 
 static int e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
 {
+       BUG_ON(size > tx_ring->count);
+
        if (e1000_desc_unused(tx_ring) >= size)
                return 0;
        return __e1000_maybe_stop_tx(tx_ring, size);
 }
 
-#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1)
 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
                                    struct net_device *netdev)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_ring *tx_ring = adapter->tx_ring;
        unsigned int first;
-       unsigned int max_per_txd = E1000_MAX_PER_TXD;
-       unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
        unsigned int tx_flags = 0;
        unsigned int len = skb_headlen(skb);
        unsigned int nr_frags;
@@ -5056,18 +5061,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
        }
 
        mss = skb_shinfo(skb)->gso_size;
-       /*
-        * The controller does a simple calculation to
-        * make sure there is enough room in the FIFO before
-        * initiating the DMA for each buffer.  The calc is:
-        * 4 = ceil(buffer len/mss).  To make sure we don't
-        * overrun the FIFO, adjust the max buffer len if mss
-        * drops.
-        */
        if (mss) {
                u8 hdr_len;
-               max_per_txd = min(mss << 2, max_per_txd);
-               max_txd_pwr = fls(max_per_txd) - 1;
 
                /*
                 * TSO Workaround for 82571/2/3 Controllers -- if skb->data
@@ -5097,12 +5092,12 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
                count++;
        count++;
 
-       count += TXD_USE_COUNT(len, max_txd_pwr);
+       count += DIV_ROUND_UP(len, adapter->tx_fifo_limit);
 
        nr_frags = skb_shinfo(skb)->nr_frags;
        for (f = 0; f < nr_frags; f++)
-               count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
-                                      max_txd_pwr);
+               count += DIV_ROUND_UP(skb_frag_size(&skb_shinfo(skb)->frags[f]),
+                                     adapter->tx_fifo_limit);
 
        if (adapter->hw.mac.tx_pkt_filtering)
                e1000_transfer_dhcp_info(adapter, skb);
@@ -5144,15 +5139,18 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
                tx_flags |= E1000_TX_FLAGS_NO_FCS;
 
        /* if count is 0 then mapping error has occurred */
-       count = e1000_tx_map(tx_ring, skb, first, max_per_txd, nr_frags, mss);
+       count = e1000_tx_map(tx_ring, skb, first, adapter->tx_fifo_limit,
+                            nr_frags);
        if (count) {
                skb_tx_timestamp(skb);
 
                netdev_sent_queue(netdev, skb->len);
                e1000_tx_queue(tx_ring, tx_flags, count);
                /* Make sure there is space in the ring for the next send. */
-               e1000_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 2);
-
+               e1000_maybe_stop_tx(tx_ring,
+                                   (MAX_SKB_FRAGS *
+                                    DIV_ROUND_UP(PAGE_SIZE,
+                                                 adapter->tx_fifo_limit) + 2));
        } else {
                dev_kfree_skb_any(skb);
                tx_ring->buffer_info[first].time_stamp = 0;
@@ -6327,8 +6325,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
        adapter->hw.phy.autoneg_advertised = 0x2f;
 
        /* ring size defaults */
-       adapter->rx_ring->count = 256;
-       adapter->tx_ring->count = 256;
+       adapter->rx_ring->count = E1000_DEFAULT_RXD;
+       adapter->tx_ring->count = E1000_DEFAULT_TXD;
 
        /*
         * Initial Wake on LAN setting - If APM wake is enabled in
index 827b72dfce99690093f0fe81ee3ccf804d59db85..2f816c6aed72da16bc6afc22c957f0b6c7a5eac0 100644 (file)
@@ -1234,13 +1234,13 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
                                mlx4_info(dev, "non-primary physical function, skipping.\n");
                        else
                                mlx4_err(dev, "QUERY_FW command failed, aborting.\n");
-                       goto unmap_bf;
+                       return err;
                }
 
                err = mlx4_load_fw(dev);
                if (err) {
                        mlx4_err(dev, "Failed to start FW, aborting.\n");
-                       goto unmap_bf;
+                       return err;
                }
 
                mlx4_cfg.log_pg_sz_m = 1;
@@ -1304,7 +1304,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
                err = mlx4_init_slave(dev);
                if (err) {
                        mlx4_err(dev, "Failed to initialize slave\n");
-                       goto unmap_bf;
+                       return err;
                }
 
                err = mlx4_slave_cap(dev);
@@ -1324,7 +1324,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
        err = mlx4_QUERY_ADAPTER(dev, &adapter);
        if (err) {
                mlx4_err(dev, "QUERY_ADAPTER command failed, aborting.\n");
-               goto err_close;
+               goto unmap_bf;
        }
 
        priv->eq_table.inta_pin = adapter.inta_pin;
@@ -1332,6 +1332,9 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
 
        return 0;
 
+unmap_bf:
+       unmap_bf_area(dev);
+
 err_close:
        mlx4_close_hca(dev);
 
@@ -1344,8 +1347,6 @@ err_stop_fw:
                mlx4_UNMAP_FA(dev);
                mlx4_free_icm(dev, priv->fw.fw_icm, 0);
        }
-unmap_bf:
-       unmap_bf_area(dev);
        return err;
 }
 
@@ -1996,7 +1997,8 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
        }
 
 slave_start:
-       if (mlx4_cmd_init(dev)) {
+       err = mlx4_cmd_init(dev);
+       if (err) {
                mlx4_err(dev, "Failed to init command interface, aborting.\n");
                goto err_sriov;
        }
index a018ea2a43deb9c67e773032e62d8a83f54bb3d9..e151c21baf2baf5970c9c232c79d0c8650c0eb4e 100644 (file)
@@ -137,11 +137,11 @@ static int mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
        return err;
 }
 
-static struct mlx4_promisc_qp *get_promisc_qp(struct mlx4_dev *dev, u8 pf_num,
+static struct mlx4_promisc_qp *get_promisc_qp(struct mlx4_dev *dev, u8 port,
                                              enum mlx4_steer_type steer,
                                              u32 qpn)
 {
-       struct mlx4_steer *s_steer = &mlx4_priv(dev)->steer[pf_num];
+       struct mlx4_steer *s_steer = &mlx4_priv(dev)->steer[port - 1];
        struct mlx4_promisc_qp *pqp;
 
        list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) {
@@ -182,7 +182,7 @@ static int new_steering_entry(struct mlx4_dev *dev, u8 port,
        /* If the given qpn is also a promisc qp,
         * it should be inserted to duplicates list
         */
-       pqp = get_promisc_qp(dev, 0, steer, qpn);
+       pqp = get_promisc_qp(dev, port, steer, qpn);
        if (pqp) {
                dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
                if (!dqp) {
@@ -256,7 +256,7 @@ static int existing_steering_entry(struct mlx4_dev *dev, u8 port,
 
        s_steer = &mlx4_priv(dev)->steer[port - 1];
 
-       pqp = get_promisc_qp(dev, 0, steer, qpn);
+       pqp = get_promisc_qp(dev, port, steer, qpn);
        if (!pqp)
                return 0; /* nothing to do */
 
@@ -302,7 +302,7 @@ static bool check_duplicate_entry(struct mlx4_dev *dev, u8 port,
        s_steer = &mlx4_priv(dev)->steer[port - 1];
 
        /* if qp is not promisc, it cannot be duplicated */
-       if (!get_promisc_qp(dev, 0, steer, qpn))
+       if (!get_promisc_qp(dev, port, steer, qpn))
                return false;
 
        /* The qp is promisc qp so it is a duplicate on this index
@@ -352,7 +352,7 @@ static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 port,
        members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
        for (i = 0;  i < members_count; i++) {
                qpn = be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK;
-               if (!get_promisc_qp(dev, 0, steer, qpn) && qpn != tqpn) {
+               if (!get_promisc_qp(dev, port, steer, qpn) && qpn != tqpn) {
                        /* the qp is not promisc, the entry can't be removed */
                        goto out;
                }
@@ -398,7 +398,7 @@ static int add_promisc_qp(struct mlx4_dev *dev, u8 port,
 
        mutex_lock(&priv->mcg_table.mutex);
 
-       if (get_promisc_qp(dev, 0, steer, qpn)) {
+       if (get_promisc_qp(dev, port, steer, qpn)) {
                err = 0;  /* Noting to do, already exists */
                goto out_mutex;
        }
@@ -503,7 +503,7 @@ static int remove_promisc_qp(struct mlx4_dev *dev, u8 port,
        s_steer = &mlx4_priv(dev)->steer[port - 1];
        mutex_lock(&priv->mcg_table.mutex);
 
-       pqp = get_promisc_qp(dev, 0, steer, qpn);
+       pqp = get_promisc_qp(dev, port, steer, qpn);
        if (unlikely(!pqp)) {
                mlx4_warn(dev, "QP %x is not promiscuous QP\n", qpn);
                /* nothing to do */
@@ -650,13 +650,6 @@ static int find_entry(struct mlx4_dev *dev, u8 port,
        return err;
 }
 
-struct mlx4_net_trans_rule_hw_ctrl {
-       __be32 ctrl;
-       __be32 vf_vep_port;
-       __be32 qpn;
-       __be32 reserved;
-};
-
 static void trans_rule_ctrl_to_hw(struct mlx4_net_trans_rule *ctrl,
                                  struct mlx4_net_trans_rule_hw_ctrl *hw)
 {
@@ -680,87 +673,18 @@ static void trans_rule_ctrl_to_hw(struct mlx4_net_trans_rule *ctrl,
        hw->qpn = cpu_to_be32(ctrl->qpn);
 }
 
-struct mlx4_net_trans_rule_hw_ib {
-       u8      size;
-       u8      rsvd1;
-       __be16  id;
-       u32     rsvd2;
-       __be32  qpn;
-       __be32  qpn_mask;
-       u8      dst_gid[16];
-       u8      dst_gid_msk[16];
-} __packed;
-
-struct mlx4_net_trans_rule_hw_eth {
-       u8      size;
-       u8      rsvd;
-       __be16  id;
-       u8      rsvd1[6];
-       u8      dst_mac[6];
-       u16     rsvd2;
-       u8      dst_mac_msk[6];
-       u16     rsvd3;
-       u8      src_mac[6];
-       u16     rsvd4;
-       u8      src_mac_msk[6];
-       u8      rsvd5;
-       u8      ether_type_enable;
-       __be16  ether_type;
-       __be16  vlan_id_msk;
-       __be16  vlan_id;
-} __packed;
-
-struct mlx4_net_trans_rule_hw_tcp_udp {
-       u8      size;
-       u8      rsvd;
-       __be16  id;
-       __be16  rsvd1[3];
-       __be16  dst_port;
-       __be16  rsvd2;
-       __be16  dst_port_msk;
-       __be16  rsvd3;
-       __be16  src_port;
-       __be16  rsvd4;
-       __be16  src_port_msk;
-} __packed;
-
-struct mlx4_net_trans_rule_hw_ipv4 {
-       u8      size;
-       u8      rsvd;
-       __be16  id;
-       __be32  rsvd1;
-       __be32  dst_ip;
-       __be32  dst_ip_msk;
-       __be32  src_ip;
-       __be32  src_ip_msk;
-} __packed;
-
-struct _rule_hw {
-       union {
-               struct {
-                       u8 size;
-                       u8 rsvd;
-                       __be16 id;
-               };
-               struct mlx4_net_trans_rule_hw_eth eth;
-               struct mlx4_net_trans_rule_hw_ib ib;
-               struct mlx4_net_trans_rule_hw_ipv4 ipv4;
-               struct mlx4_net_trans_rule_hw_tcp_udp tcp_udp;
-       };
+const u16 __sw_id_hw[] = {
+       [MLX4_NET_TRANS_RULE_ID_ETH]     = 0xE001,
+       [MLX4_NET_TRANS_RULE_ID_IB]      = 0xE005,
+       [MLX4_NET_TRANS_RULE_ID_IPV6]    = 0xE003,
+       [MLX4_NET_TRANS_RULE_ID_IPV4]    = 0xE002,
+       [MLX4_NET_TRANS_RULE_ID_TCP]     = 0xE004,
+       [MLX4_NET_TRANS_RULE_ID_UDP]     = 0xE006
 };
 
 static int parse_trans_rule(struct mlx4_dev *dev, struct mlx4_spec_list *spec,
                            struct _rule_hw *rule_hw)
 {
-       static const u16 __sw_id_hw[] = {
-               [MLX4_NET_TRANS_RULE_ID_ETH]     = 0xE001,
-               [MLX4_NET_TRANS_RULE_ID_IB]      = 0xE005,
-               [MLX4_NET_TRANS_RULE_ID_IPV6]    = 0xE003,
-               [MLX4_NET_TRANS_RULE_ID_IPV4]    = 0xE002,
-               [MLX4_NET_TRANS_RULE_ID_TCP]     = 0xE004,
-               [MLX4_NET_TRANS_RULE_ID_UDP]     = 0xE006
-       };
-
        static const size_t __rule_hw_sz[] = {
                [MLX4_NET_TRANS_RULE_ID_ETH] =
                        sizeof(struct mlx4_net_trans_rule_hw_eth),
index 4d9df8f2a12617047355fc9988d5187af80a95f5..dba69d98734a29b9a10038e22eff8e93714147ed 100644 (file)
@@ -690,6 +690,82 @@ struct mlx4_steer {
        struct list_head steer_entries[MLX4_NUM_STEERS];
 };
 
+struct mlx4_net_trans_rule_hw_ctrl {
+       __be32 ctrl;
+       __be32 vf_vep_port;
+       __be32 qpn;
+       __be32 reserved;
+};
+
+struct mlx4_net_trans_rule_hw_ib {
+       u8 size;
+       u8 rsvd1;
+       __be16 id;
+       u32 rsvd2;
+       __be32 qpn;
+       __be32 qpn_mask;
+       u8 dst_gid[16];
+       u8 dst_gid_msk[16];
+} __packed;
+
+struct mlx4_net_trans_rule_hw_eth {
+       u8      size;
+       u8      rsvd;
+       __be16  id;
+       u8      rsvd1[6];
+       u8      dst_mac[6];
+       u16     rsvd2;
+       u8      dst_mac_msk[6];
+       u16     rsvd3;
+       u8      src_mac[6];
+       u16     rsvd4;
+       u8      src_mac_msk[6];
+       u8      rsvd5;
+       u8      ether_type_enable;
+       __be16  ether_type;
+       __be16  vlan_id_msk;
+       __be16  vlan_id;
+} __packed;
+
+struct mlx4_net_trans_rule_hw_tcp_udp {
+       u8      size;
+       u8      rsvd;
+       __be16  id;
+       __be16  rsvd1[3];
+       __be16  dst_port;
+       __be16  rsvd2;
+       __be16  dst_port_msk;
+       __be16  rsvd3;
+       __be16  src_port;
+       __be16  rsvd4;
+       __be16  src_port_msk;
+} __packed;
+
+struct mlx4_net_trans_rule_hw_ipv4 {
+       u8      size;
+       u8      rsvd;
+       __be16  id;
+       __be32  rsvd1;
+       __be32  dst_ip;
+       __be32  dst_ip_msk;
+       __be32  src_ip;
+       __be32  src_ip_msk;
+} __packed;
+
+struct _rule_hw {
+       union {
+               struct {
+                       u8 size;
+                       u8 rsvd;
+                       __be16 id;
+               };
+               struct mlx4_net_trans_rule_hw_eth eth;
+               struct mlx4_net_trans_rule_hw_ib ib;
+               struct mlx4_net_trans_rule_hw_ipv4 ipv4;
+               struct mlx4_net_trans_rule_hw_tcp_udp tcp_udp;
+       };
+};
+
 struct mlx4_priv {
        struct mlx4_dev         dev;
 
index 94ceddd17ab28a3ea13a15ea4c7ec1211042278f..293c9e820c49b5d470dce7eda95f2252b94d4251 100644 (file)
@@ -42,6 +42,7 @@
 #include <linux/mlx4/cmd.h>
 #include <linux/mlx4/qp.h>
 #include <linux/if_ether.h>
+#include <linux/etherdevice.h>
 
 #include "mlx4.h"
 #include "fw.h"
@@ -2776,18 +2777,133 @@ ex_put:
        return err;
 }
 
+/*
+ * MAC validation for Flow Steering rules.
+ * VF can attach rules only with a mac address which is assigned to it.
+ */
+static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
+                                  struct list_head *rlist)
+{
+       struct mac_res *res, *tmp;
+       __be64 be_mac;
+
+       /* make sure it isn't multicast or broadcast mac*/
+       if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
+           !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
+               list_for_each_entry_safe(res, tmp, rlist, list) {
+                       be_mac = cpu_to_be64(res->mac << 16);
+                       if (!memcmp(&be_mac, eth_header->eth.dst_mac, ETH_ALEN))
+                               return 0;
+               }
+               pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
+                      eth_header->eth.dst_mac, slave);
+               return -EINVAL;
+       }
+       return 0;
+}
+
+/*
+ * In case of missing eth header, append eth header with a MAC address
+ * assigned to the VF.
+ */
+static int add_eth_header(struct mlx4_dev *dev, int slave,
+                         struct mlx4_cmd_mailbox *inbox,
+                         struct list_head *rlist, int header_id)
+{
+       struct mac_res *res, *tmp;
+       u8 port;
+       struct mlx4_net_trans_rule_hw_ctrl *ctrl;
+       struct mlx4_net_trans_rule_hw_eth *eth_header;
+       struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
+       struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
+       __be64 be_mac = 0;
+       __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
+
+       ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
+       port = be32_to_cpu(ctrl->vf_vep_port) & 0xff;
+       eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
+
+       /* Clear a space in the inbox for eth header */
+       switch (header_id) {
+       case MLX4_NET_TRANS_RULE_ID_IPV4:
+               ip_header =
+                       (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
+               memmove(ip_header, eth_header,
+                       sizeof(*ip_header) + sizeof(*l4_header));
+               break;
+       case MLX4_NET_TRANS_RULE_ID_TCP:
+       case MLX4_NET_TRANS_RULE_ID_UDP:
+               l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
+                           (eth_header + 1);
+               memmove(l4_header, eth_header, sizeof(*l4_header));
+               break;
+       default:
+               return -EINVAL;
+       }
+       list_for_each_entry_safe(res, tmp, rlist, list) {
+               if (port == res->port) {
+                       be_mac = cpu_to_be64(res->mac << 16);
+                       break;
+               }
+       }
+       if (!be_mac) {
+               pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d .\n",
+                      port);
+               return -EINVAL;
+       }
+
+       memset(eth_header, 0, sizeof(*eth_header));
+       eth_header->size = sizeof(*eth_header) >> 2;
+       eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
+       memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
+       memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
+
+       return 0;
+
+}
+
 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
                                         struct mlx4_vhcr *vhcr,
                                         struct mlx4_cmd_mailbox *inbox,
                                         struct mlx4_cmd_mailbox *outbox,
                                         struct mlx4_cmd_info *cmd)
 {
+
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+       struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
        int err;
+       struct mlx4_net_trans_rule_hw_ctrl *ctrl;
+       struct _rule_hw  *rule_header;
+       int header_id;
 
        if (dev->caps.steering_mode !=
            MLX4_STEERING_MODE_DEVICE_MANAGED)
                return -EOPNOTSUPP;
 
+       ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
+       rule_header = (struct _rule_hw *)(ctrl + 1);
+       header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
+
+       switch (header_id) {
+       case MLX4_NET_TRANS_RULE_ID_ETH:
+               if (validate_eth_header_mac(slave, rule_header, rlist))
+                       return -EINVAL;
+               break;
+       case MLX4_NET_TRANS_RULE_ID_IPV4:
+       case MLX4_NET_TRANS_RULE_ID_TCP:
+       case MLX4_NET_TRANS_RULE_ID_UDP:
+               pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n");
+               if (add_eth_header(dev, slave, inbox, rlist, header_id))
+                       return -EINVAL;
+               vhcr->in_modifier +=
+                       sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
+               break;
+       default:
+               pr_err("Corrupted mailbox.\n");
+               return -EINVAL;
+       }
+
        err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
                           vhcr->in_modifier, 0,
                           MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
index bb8c8222122b920511f729463a04b97395724c7d..4d15bf413bdc89f060964c8114d4486be337ce46 100644 (file)
@@ -751,6 +751,7 @@ static int __devinit sgiseeq_probe(struct platform_device *pdev)
        sp->srings = sr;
        sp->rx_desc = sp->srings->rxvector;
        sp->tx_desc = sp->srings->txvector;
+       spin_lock_init(&sp->tx_lock);
 
        /* A couple calculations now, saves many cycles later. */
        setup_rx_ring(dev, sp->rx_desc, SEEQ_RX_BUFFERS);
index 8cba2df82b18b9f2dfa1ff82eee3b22ef9eeb0c9..5faedd855b779272342b37c6d992a1a93562a70b 100644 (file)
@@ -863,8 +863,8 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx,
                                       &ip_entry->ip4dst, &ip_entry->pdst);
        if (rc != 0) {
                rc = efx_filter_get_ipv4_full(
-                       &spec, &proto, &ip_entry->ip4src, &ip_entry->psrc,
-                       &ip_entry->ip4dst, &ip_entry->pdst);
+                       &spec, &proto, &ip_entry->ip4dst, &ip_entry->pdst,
+                       &ip_entry->ip4src, &ip_entry->psrc);
                EFX_WARN_ON_PARANOID(rc);
                ip_mask->ip4src = ~0;
                ip_mask->psrc = ~0;
index e2d083228f3a6b6b6039ab3276182d3adbc72869..719be3912aa9ca5cb7b0a527811df460c257561b 100644 (file)
@@ -22,6 +22,9 @@
   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
 *******************************************************************************/
 
+#ifndef __COMMON_H__
+#define __COMMON_H__
+
 #include <linux/etherdevice.h>
 #include <linux/netdevice.h>
 #include <linux/phy.h>
@@ -366,3 +369,5 @@ extern void stmmac_set_mac(void __iomem *ioaddr, bool enable);
 
 extern void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
 extern const struct stmmac_ring_mode_ops ring_mode_ops;
+
+#endif /* __COMMON_H__ */
index 9820ec842cc01f8a7fb0897d993d0b873902dee8..223adf95fd0374e465447adc52d854491721fe4f 100644 (file)
 
   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
 *******************************************************************************/
+
+#ifndef __DESCS_H__
+#define __DESCS_H__
+
 struct dma_desc {
        /* Receive descriptor */
        union {
@@ -166,3 +170,5 @@ enum tdes_csum_insertion {
                                         * is not calculated */
        cic_full = 3,           /* IP header and pseudoheader */
 };
+
+#endif /* __DESCS_H__ */
index dd8d6e19dff6b2c1182dfb0f31ca8c4696c84931..7ee9499a6e385a77b9b2110b9578750e767745ab 100644 (file)
@@ -27,6 +27,9 @@
   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
 *******************************************************************************/
 
+#ifndef __DESC_COM_H__
+#define __DESC_COM_H__
+
 #if defined(CONFIG_STMMAC_RING)
 static inline void ehn_desc_rx_set_on_ring_chain(struct dma_desc *p, int end)
 {
@@ -124,3 +127,5 @@ static inline void norm_set_tx_desc_len(struct dma_desc *p, int len)
        p->des01.tx.buffer1_size = len;
 }
 #endif
+
+#endif /* __DESC_COM_H__ */
index 7c6d857a9cc7f0c357bb7b1d0cb9d417ecbc25a5..2ec6aeae349e5f7dd3b4e873d1366037a943cb27 100644 (file)
@@ -22,6 +22,9 @@
   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
 *******************************************************************************/
 
+#ifndef __DWMAC100_H__
+#define __DWMAC100_H__
+
 #include <linux/phy.h>
 #include "common.h"
 
@@ -119,3 +122,5 @@ enum ttc_control {
 #define DMA_MISSED_FRAME_M_CNTR        0x0000ffff      /* Missed Frame Couinter */
 
 extern const struct stmmac_dma_ops dwmac100_dma_ops;
+
+#endif /* __DWMAC100_H__ */
index f90fcb5f957351b742df584104eade1df609bb8b..0e4cacedc1f0e0cf44fad3bb84c5bac000f8e6ee 100644 (file)
@@ -19,6 +19,8 @@
 
   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
 *******************************************************************************/
+#ifndef __DWMAC1000_H__
+#define __DWMAC1000_H__
 
 #include <linux/phy.h>
 #include "common.h"
@@ -229,6 +231,7 @@ enum rtc_control {
 #define GMAC_MMC_RX_CSUM_OFFLOAD   0x208
 
 /* Synopsys Core versions */
-#define        DWMAC_CORE_3_40 34
+#define        DWMAC_CORE_3_40 0x34
 
 extern const struct stmmac_dma_ops dwmac1000_dma_ops;
+#endif /* __DWMAC1000_H__ */
index e678ce39d0146b707c012bb7bbd40a58361855c2..e49c9a0fd6ffe9a8ad52f19134e2e8c1aa7dcd2e 100644 (file)
@@ -22,6 +22,9 @@
   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
 *******************************************************************************/
 
+#ifndef __DWMAC_DMA_H__
+#define __DWMAC_DMA_H__
+
 /* DMA CRS Control and Status Register Mapping */
 #define DMA_BUS_MODE           0x00001000      /* Bus Mode */
 #define DMA_XMT_POLL_DEMAND    0x00001004      /* Transmit Poll Demand */
@@ -109,3 +112,5 @@ extern void dwmac_dma_start_rx(void __iomem *ioaddr);
 extern void dwmac_dma_stop_rx(void __iomem *ioaddr);
 extern int dwmac_dma_interrupt(void __iomem *ioaddr,
                                struct stmmac_extra_stats *x);
+
+#endif /* __DWMAC_DMA_H__ */
index a38352024cb8fec7897fc4a990972658efb35dfb..67995ef252515f53c6796ecf77bbe66460223944 100644 (file)
@@ -22,6 +22,9 @@
   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
 *******************************************************************************/
 
+#ifndef __MMC_H__
+#define __MMC_H__
+
 /* MMC control register */
 /* When set, all counter are reset */
 #define MMC_CNTRL_COUNTER_RESET                0x1
@@ -129,3 +132,5 @@ struct stmmac_counters {
 extern void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode);
 extern void dwmac_mmc_intr_all_mask(void __iomem *ioaddr);
 extern void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc);
+
+#endif /* __MMC_H__ */
index c07cfe989f6ef35716c59d3b5e29cdeefa4f1f2a..0c74a702d4610597da59200be253306c7b4db534 100644 (file)
@@ -33,7 +33,7 @@
 #define MMC_TX_INTR            0x00000108      /* MMC TX Interrupt */
 #define MMC_RX_INTR_MASK       0x0000010c      /* MMC Interrupt Mask */
 #define MMC_TX_INTR_MASK       0x00000110      /* MMC Interrupt Mask */
-#define MMC_DEFAUL_MASK                0xffffffff
+#define MMC_DEFAULT_MASK               0xffffffff
 
 /* MMC TX counter registers */
 
@@ -147,8 +147,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
 /* To mask all all interrupts.*/
 void dwmac_mmc_intr_all_mask(void __iomem *ioaddr)
 {
-       writel(MMC_DEFAUL_MASK, ioaddr + MMC_RX_INTR_MASK);
-       writel(MMC_DEFAUL_MASK, ioaddr + MMC_TX_INTR_MASK);
+       writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_INTR_MASK);
+       writel(MMC_DEFAULT_MASK, ioaddr + MMC_TX_INTR_MASK);
 }
 
 /* This reads the MAC core counters (if actaully supported).
index f2d3665430ad455b82552d975d9f91066a5fc913..e872e1da3137cef68265e845188bb8544acb988e 100644 (file)
@@ -20,6 +20,9 @@
   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
 *******************************************************************************/
 
+#ifndef __STMMAC_H__
+#define __STMMAC_H__
+
 #define STMMAC_RESOURCE_NAME   "stmmaceth"
 #define DRV_MODULE_VERSION     "March_2012"
 
@@ -166,3 +169,5 @@ static inline void stmmac_unregister_pci(void)
 {
 }
 #endif /* CONFIG_STMMAC_PCI */
+
+#endif /* __STMMAC_H__ */
index 6863590d184bcc9c335da8bfb51562eafafc7557..aea9b14cdfbeff126a962e766ca97894284e51b6 100644 (file)
@@ -21,6 +21,8 @@
 
   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
 *******************************************************************************/
+#ifndef __STMMAC_TIMER_H__
+#define __STMMAC_TIMER_H__
 
 struct stmmac_timer {
        void (*timer_start) (unsigned int new_freq);
@@ -40,3 +42,5 @@ void stmmac_schedule(struct net_device *dev);
 extern int tmu2_register_user(void *fnt, void *data);
 extern void tmu2_unregister_user(void);
 #endif
+
+#endif /* __STMMAC_TIMER_H__ */
index cd7ee204e94a10abaa88b3288dcd2cace5ceca3e..a9ca4a03d31b2fe2adc68818da636edb8d5aeb2b 100644 (file)
@@ -394,8 +394,10 @@ static int __devexit davinci_mdio_remove(struct platform_device *pdev)
        struct device *dev = &pdev->dev;
        struct davinci_mdio_data *data = dev_get_drvdata(dev);
 
-       if (data->bus)
+       if (data->bus) {
+               mdiobus_unregister(data->bus);
                mdiobus_free(data->bus);
+       }
 
        if (data->clk)
                clk_put(data->clk);
index 24d8566cfd8b98cdd50540b09b19c9287757e914..441b4dc79450c1009151692565586c53060e5c83 100644 (file)
@@ -673,7 +673,7 @@ void smt_add_para(struct s_smc *smc, struct s_pcon *pcon, u_short para,
                        sm_pm_get_ls(smc,port_to_mib(smc,port))) ;
                break ;
        case SMT_P_REASON :
-               * (u_long *) to = 0 ;
+               *(u32 *)to = 0 ;
                sp_len = 4 ;
                goto sp_done ;
        case SMT_P1033 :                        /* time stamp */
index 328397c66730cae37a9c1cdc02cdb7dde523e2b3..b1ba68f1a049202dac23f6fc4b9f944f8526cb71 100644 (file)
@@ -297,7 +297,7 @@ static int qmi_wwan_suspend(struct usb_interface *intf, pm_message_t message)
        if (ret < 0)
                goto err;
 
-       if (info->subdriver && info->subdriver->suspend)
+       if (intf == info->control && info->subdriver && info->subdriver->suspend)
                ret = info->subdriver->suspend(intf, message);
        if (ret < 0)
                usbnet_resume(intf);
@@ -310,13 +310,14 @@ static int qmi_wwan_resume(struct usb_interface *intf)
        struct usbnet *dev = usb_get_intfdata(intf);
        struct qmi_wwan_state *info = (void *)&dev->data;
        int ret = 0;
+       bool callsub = (intf == info->control && info->subdriver && info->subdriver->resume);
 
-       if (info->subdriver && info->subdriver->resume)
+       if (callsub)
                ret = info->subdriver->resume(intf);
        if (ret < 0)
                goto err;
        ret = usbnet_resume(intf);
-       if (ret < 0 && info->subdriver && info->subdriver->resume && info->subdriver->suspend)
+       if (ret < 0 && callsub && info->subdriver->suspend)
                info->subdriver->suspend(intf, PMSG_SUSPEND);
 err:
        return ret;
@@ -398,7 +399,6 @@ static const struct usb_device_id products[] = {
        /* 4. Gobi 1000 devices */
        {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)},    /* Acer Gobi Modem Device */
        {QMI_GOBI1K_DEVICE(0x03f0, 0x1f1d)},    /* HP un2400 Gobi Modem Device */
-       {QMI_GOBI1K_DEVICE(0x03f0, 0x371d)},    /* HP un2430 Mobile Broadband Module */
        {QMI_GOBI1K_DEVICE(0x04da, 0x250d)},    /* Panasonic Gobi Modem device */
        {QMI_GOBI1K_DEVICE(0x413c, 0x8172)},    /* Dell Gobi Modem device */
        {QMI_GOBI1K_DEVICE(0x1410, 0xa001)},    /* Novatel Gobi Modem device */
@@ -413,7 +413,9 @@ static const struct usb_device_id products[] = {
 
        /* 5. Gobi 2000 and 3000 devices */
        {QMI_GOBI_DEVICE(0x413c, 0x8186)},      /* Dell Gobi 2000 Modem device (N0218, VU936) */
+       {QMI_GOBI_DEVICE(0x413c, 0x8194)},      /* Dell Gobi 3000 Composite */
        {QMI_GOBI_DEVICE(0x05c6, 0x920b)},      /* Generic Gobi 2000 Modem device */
+       {QMI_GOBI_DEVICE(0x05c6, 0x920d)},      /* Gobi 3000 Composite */
        {QMI_GOBI_DEVICE(0x05c6, 0x9225)},      /* Sony Gobi 2000 Modem device (N0279, VU730) */
        {QMI_GOBI_DEVICE(0x05c6, 0x9245)},      /* Samsung Gobi 2000 Modem device (VL176) */
        {QMI_GOBI_DEVICE(0x03f0, 0x251d)},      /* HP Gobi 2000 Modem device (VP412) */
@@ -438,9 +440,12 @@ static const struct usb_device_id products[] = {
        {QMI_GOBI_DEVICE(0x16d8, 0x8002)},      /* CMDTech Gobi 2000 Modem device (VU922) */
        {QMI_GOBI_DEVICE(0x05c6, 0x9205)},      /* Gobi 2000 Modem device */
        {QMI_GOBI_DEVICE(0x1199, 0x9013)},      /* Sierra Wireless Gobi 3000 Modem device (MC8355) */
+       {QMI_GOBI_DEVICE(0x03f0, 0x371d)},      /* HP un2430 Mobile Broadband Module */
        {QMI_GOBI_DEVICE(0x1199, 0x9015)},      /* Sierra Wireless Gobi 3000 Modem device */
        {QMI_GOBI_DEVICE(0x1199, 0x9019)},      /* Sierra Wireless Gobi 3000 Modem device */
        {QMI_GOBI_DEVICE(0x1199, 0x901b)},      /* Sierra Wireless MC7770 */
+       {QMI_GOBI_DEVICE(0x12d1, 0x14f1)},      /* Sony Gobi 3000 Composite */
+       {QMI_GOBI_DEVICE(0x1410, 0xa021)},      /* Foxconn Gobi 3000 Modem device (Novatel E396) */
 
        { }                                     /* END */
 };
index 7be49ea60b6d8f9353720d757f47c2c61b16fc60..8e22417fa6c11b5d41845bda1ff4c84e9d369cda 100644 (file)
@@ -656,7 +656,7 @@ static int sierra_net_get_fw_attr(struct usbnet *dev, u16 *datap)
                return -EIO;
        }
 
-       *datap = *attrdata;
+       *datap = le16_to_cpu(*attrdata);
 
        kfree(attrdata);
        return result;
index 8531c1caac283263febc4f674daedef5b7844ff1..fc9f578a1e253a781b9406b1e4a43ed2de2688d1 100644 (file)
@@ -1201,19 +1201,26 @@ deferred:
 }
 EXPORT_SYMBOL_GPL(usbnet_start_xmit);
 
-static void rx_alloc_submit(struct usbnet *dev, gfp_t flags)
+static int rx_alloc_submit(struct usbnet *dev, gfp_t flags)
 {
        struct urb      *urb;
        int             i;
+       int             ret = 0;
 
        /* don't refill the queue all at once */
        for (i = 0; i < 10 && dev->rxq.qlen < RX_QLEN(dev); i++) {
                urb = usb_alloc_urb(0, flags);
                if (urb != NULL) {
-                       if (rx_submit(dev, urb, flags) == -ENOLINK)
-                               return;
+                       ret = rx_submit(dev, urb, flags);
+                       if (ret)
+                               goto err;
+               } else {
+                       ret = -ENOMEM;
+                       goto err;
                }
        }
+err:
+       return ret;
 }
 
 /*-------------------------------------------------------------------------*/
@@ -1257,7 +1264,8 @@ static void usbnet_bh (unsigned long param)
                int     temp = dev->rxq.qlen;
 
                if (temp < RX_QLEN(dev)) {
-                       rx_alloc_submit(dev, GFP_ATOMIC);
+                       if (rx_alloc_submit(dev, GFP_ATOMIC) == -ENOLINK)
+                               return;
                        if (temp != dev->rxq.qlen)
                                netif_dbg(dev, link, dev->net,
                                          "rxqlen %d --> %d\n",
@@ -1573,7 +1581,7 @@ int usbnet_resume (struct usb_interface *intf)
                                netif_device_present(dev->net) &&
                                !timer_pending(&dev->delay) &&
                                !test_bit(EVENT_RX_HALT, &dev->flags))
-                                       rx_alloc_submit(dev, GFP_KERNEL);
+                                       rx_alloc_submit(dev, GFP_NOIO);
 
                        if (!(dev->txq.qlen >= TX_QLEN(dev)))
                                netif_tx_wake_all_queues(dev->net);
index aaaca9aa2293dcf4fc991cb1645056ebfa3d0da1..3f575afd8cfcb03f283df0932f6fb33bb1495cca 100644 (file)
@@ -10,6 +10,7 @@
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
+#include <linux/module.h>
 #include <linux/bitops.h>
 #include <linux/cdev.h>
 #include <linux/dma-mapping.h>
index 4026c906cc7b45745e3a80dbc5edc9360413f78f..b7e0258887e70cf9be193c70e73334e9e8fe0473 100644 (file)
@@ -1482,7 +1482,7 @@ ath5k_eeprom_read_target_rate_pwr_info(struct ath5k_hw *ah, unsigned int mode)
        case AR5K_EEPROM_MODE_11A:
                offset += AR5K_EEPROM_TARGET_PWR_OFF_11A(ee->ee_version);
                rate_pcal_info = ee->ee_rate_tpwr_a;
-               ee->ee_rate_target_pwr_num[mode] = AR5K_EEPROM_N_5GHZ_CHAN;
+               ee->ee_rate_target_pwr_num[mode] = AR5K_EEPROM_N_5GHZ_RATE_CHAN;
                break;
        case AR5K_EEPROM_MODE_11B:
                offset += AR5K_EEPROM_TARGET_PWR_OFF_11B(ee->ee_version);
index dc2bcfeadeb45b121de183bd558045a69dda47ba..94a9bbea6874c0daf10510e26fa2862f8ebbfa81 100644 (file)
 #define AR5K_EEPROM_EEP_DELTA          10
 #define AR5K_EEPROM_N_MODES            3
 #define AR5K_EEPROM_N_5GHZ_CHAN                10
+#define AR5K_EEPROM_N_5GHZ_RATE_CHAN   8
 #define AR5K_EEPROM_N_2GHZ_CHAN                3
 #define AR5K_EEPROM_N_2GHZ_CHAN_2413   4
 #define        AR5K_EEPROM_N_2GHZ_CHAN_MAX     4
index 2c9f7d7ed4cc2557a86cb5256afc213a4c16f5c5..0ed3846f9cbb36e8aa1f67eab09dc9e5f219b0c8 100644 (file)
@@ -142,6 +142,7 @@ static int ar9003_paprd_setup_single_table(struct ath_hw *ah)
        };
        int training_power;
        int i, val;
+       u32 am2pm_mask = ah->paprd_ratemask;
 
        if (IS_CHAN_2GHZ(ah->curchan))
                training_power = ar9003_get_training_power_2g(ah);
@@ -158,10 +159,13 @@ static int ar9003_paprd_setup_single_table(struct ath_hw *ah)
        }
        ah->paprd_training_power = training_power;
 
+       if (AR_SREV_9330(ah))
+               am2pm_mask = 0;
+
        REG_RMW_FIELD(ah, AR_PHY_PAPRD_AM2AM, AR_PHY_PAPRD_AM2AM_MASK,
                      ah->paprd_ratemask);
        REG_RMW_FIELD(ah, AR_PHY_PAPRD_AM2PM, AR_PHY_PAPRD_AM2PM_MASK,
-                     ah->paprd_ratemask);
+                     am2pm_mask);
        REG_RMW_FIELD(ah, AR_PHY_PAPRD_HT40, AR_PHY_PAPRD_HT40_MASK,
                      ah->paprd_ratemask_ht40);
 
@@ -782,6 +786,102 @@ int ar9003_paprd_setup_gain_table(struct ath_hw *ah, int chain)
 }
 EXPORT_SYMBOL(ar9003_paprd_setup_gain_table);
 
+static bool ar9003_paprd_retrain_pa_in(struct ath_hw *ah,
+                                      struct ath9k_hw_cal_data *caldata,
+                                      int chain)
+{
+       u32 *pa_in = caldata->pa_table[chain];
+       int capdiv_offset, quick_drop_offset;
+       int capdiv2g, quick_drop;
+       int count = 0;
+       int i;
+
+       if (!AR_SREV_9485(ah) && !AR_SREV_9330(ah))
+               return false;
+
+       capdiv2g = REG_READ_FIELD(ah, AR_PHY_65NM_CH0_TXRF3,
+                                 AR_PHY_65NM_CH0_TXRF3_CAPDIV2G);
+
+       quick_drop = REG_READ_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
+                                   AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_QUICK_DROP);
+
+       if (quick_drop)
+               quick_drop -= 0x40;
+
+       for (i = 0; i < NUM_BIN + 1; i++) {
+               if (pa_in[i] == 1400)
+                       count++;
+       }
+
+       if (AR_SREV_9485(ah)) {
+               if (pa_in[23] < 800) {
+                       capdiv_offset = (int)((1000 - pa_in[23] + 75) / 150);
+                       capdiv2g += capdiv_offset;
+                       if (capdiv2g > 7) {
+                               capdiv2g = 7;
+                               if (pa_in[23] < 600) {
+                                       quick_drop++;
+                                       if (quick_drop > 0)
+                                               quick_drop = 0;
+                               }
+                       }
+               } else if (pa_in[23] == 1400) {
+                       quick_drop_offset = min_t(int, count / 3, 2);
+                       quick_drop += quick_drop_offset;
+                       capdiv2g += quick_drop_offset / 2;
+
+                       if (capdiv2g > 7)
+                               capdiv2g = 7;
+
+                       if (quick_drop > 0) {
+                               quick_drop = 0;
+                               capdiv2g -= quick_drop_offset;
+                               if (capdiv2g < 0)
+                                       capdiv2g = 0;
+                       }
+               } else {
+                       return false;
+               }
+       } else if (AR_SREV_9330(ah)) {
+               if (pa_in[23] < 1000) {
+                       capdiv_offset = (1000 - pa_in[23]) / 100;
+                       capdiv2g += capdiv_offset;
+                       if (capdiv_offset > 3) {
+                               capdiv_offset = 1;
+                               quick_drop--;
+                       }
+
+                       capdiv2g += capdiv_offset;
+                       if (capdiv2g > 6)
+                               capdiv2g = 6;
+                       if (quick_drop < -4)
+                               quick_drop = -4;
+               } else if (pa_in[23] == 1400) {
+                       if (count > 3) {
+                               quick_drop++;
+                               capdiv2g -= count / 4;
+                               if (quick_drop > -2)
+                                       quick_drop = -2;
+                       } else {
+                               capdiv2g--;
+                       }
+
+                       if (capdiv2g < 0)
+                               capdiv2g = 0;
+               } else {
+                       return false;
+               }
+       }
+
+       REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_TXRF3,
+                     AR_PHY_65NM_CH0_TXRF3_CAPDIV2G, capdiv2g);
+       REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
+                     AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_QUICK_DROP,
+                     quick_drop);
+
+       return true;
+}
+
 int ar9003_paprd_create_curve(struct ath_hw *ah,
                              struct ath9k_hw_cal_data *caldata, int chain)
 {
@@ -817,6 +917,9 @@ int ar9003_paprd_create_curve(struct ath_hw *ah,
        if (!create_pa_curve(data_L, data_U, pa_table, small_signal_gain))
                status = -2;
 
+       if (ar9003_paprd_retrain_pa_in(ah, caldata, chain))
+               status = -EINPROGRESS;
+
        REG_CLR_BIT(ah, AR_PHY_PAPRD_TRAINER_STAT1,
                    AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE);
 
index 7bfbaf065a4332c89ac5568c0cde81ed42d9313e..84d3d49568616c5452692b1660253f6ae70468cf 100644 (file)
 #define AR_PHY_AIC_CTRL_4_B0   (AR_SM_BASE + 0x4c0)
 #define AR_PHY_AIC_STAT_2_B0   (AR_SM_BASE + 0x4cc)
 
+#define AR_PHY_65NM_CH0_TXRF3       0x16048
+#define AR_PHY_65NM_CH0_TXRF3_CAPDIV2G         0x0000001e
+#define AR_PHY_65NM_CH0_TXRF3_CAPDIV2G_S       1
+
 #define AR_PHY_65NM_CH0_SYNTH4      0x1608c
 #define AR_PHY_SYNTH4_LONG_SHIFT_SELECT   (AR_SREV_9462(ah) ? 0x00000001 : 0x00000002)
 #define AR_PHY_SYNTH4_LONG_SHIFT_SELECT_S (AR_SREV_9462(ah) ? 0 : 1)
index bacdb8fb4ef453dda48d5a9394c0030cb7866596..9f83f71742a5ecb774f95c3d563f2e0dc7d37ab7 100644 (file)
@@ -341,7 +341,8 @@ void ath9k_btcoex_stop_gen_timer(struct ath_softc *sc)
 {
        struct ath_btcoex *btcoex = &sc->btcoex;
 
-       ath9k_gen_timer_stop(sc->sc_ah, btcoex->no_stomp_timer);
+       if (btcoex->hw_timer_enabled)
+               ath9k_gen_timer_stop(sc->sc_ah, btcoex->no_stomp_timer);
 }
 
 u16 ath9k_btcoex_aggr_limit(struct ath_softc *sc, u32 max_4ms_framelen)
index 60b6a9daff7e21cde68fb6eda800e4ca065c62aa..48af40151d2310e5250ad88c0ae05b47d7cae7ea 100644 (file)
@@ -463,9 +463,6 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
                ah->config.spurchans[i][1] = AR_NO_SPUR;
        }
 
-       /* PAPRD needs some more work to be enabled */
-       ah->config.paprd_disable = 1;
-
        ah->config.rx_intr_mitigation = true;
        ah->config.pcieSerDesWrite = true;
 
@@ -978,9 +975,6 @@ static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah,
        else
                imr_reg |= AR_IMR_TXOK;
 
-       if (opmode == NL80211_IFTYPE_AP)
-               imr_reg |= AR_IMR_MIB;
-
        ENABLE_REGWRITE_BUFFER(ah);
 
        REG_WRITE(ah, AR_IMR, imr_reg);
@@ -1778,6 +1772,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
                /* Operating channel changed, reset channel calibration data */
                memset(caldata, 0, sizeof(*caldata));
                ath9k_init_nfcal_hist_buffer(ah, chan);
+       } else if (caldata) {
+               caldata->paprd_packet_sent = false;
        }
        ah->noise = ath9k_hw_getchan_noise(ah, chan);
 
@@ -2502,7 +2498,8 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
                pCap->tx_desc_len = sizeof(struct ar9003_txc);
                pCap->txs_len = sizeof(struct ar9003_txs);
                if (!ah->config.paprd_disable &&
-                   ah->eep_ops->get_eeprom(ah, EEP_PAPRD))
+                   ah->eep_ops->get_eeprom(ah, EEP_PAPRD) &&
+                   !AR_SREV_9462(ah))
                        pCap->hw_caps |= ATH9K_HW_CAP_PAPRD;
        } else {
                pCap->tx_desc_len = sizeof(struct ath_desc);
index ce7332c64efb2c5b417cd40fec2428522186142a..6599a75f01fe3157526cf4c30f999ddaef391382 100644 (file)
@@ -405,6 +405,7 @@ struct ath9k_hw_cal_data {
        int8_t iCoff;
        int8_t qCoff;
        bool rtt_done;
+       bool paprd_packet_sent;
        bool paprd_done;
        bool nfcal_pending;
        bool nfcal_interference;
index d4549e9aac5c5f1f30ace855d6c94dafe98f4d38..825a29cc93131c4a34c22d8ac8e58e02cc41b531 100644 (file)
@@ -254,8 +254,9 @@ void ath_paprd_calibrate(struct work_struct *work)
        int chain_ok = 0;
        int chain;
        int len = 1800;
+       int ret;
 
-       if (!caldata)
+       if (!caldata || !caldata->paprd_packet_sent || caldata->paprd_done)
                return;
 
        ath9k_ps_wakeup(sc);
@@ -282,13 +283,6 @@ void ath_paprd_calibrate(struct work_struct *work)
                        continue;
 
                chain_ok = 0;
-
-               ath_dbg(common, CALIBRATE,
-                       "Sending PAPRD frame for thermal measurement on chain %d\n",
-                       chain);
-               if (!ath_paprd_send_frame(sc, skb, chain))
-                       goto fail_paprd;
-
                ar9003_paprd_setup_gain_table(ah, chain);
 
                ath_dbg(common, CALIBRATE,
@@ -302,7 +296,13 @@ void ath_paprd_calibrate(struct work_struct *work)
                        break;
                }
 
-               if (ar9003_paprd_create_curve(ah, caldata, chain)) {
+               ret = ar9003_paprd_create_curve(ah, caldata, chain);
+               if (ret == -EINPROGRESS) {
+                       ath_dbg(common, CALIBRATE,
+                               "PAPRD curve on chain %d needs to be re-trained\n",
+                               chain);
+                       break;
+               } else if (ret) {
                        ath_dbg(common, CALIBRATE,
                                "PAPRD create curve failed on chain %d\n",
                                chain);
index 2c9da6b2ecb1b7b1141770f1240188bf2af50277..0d4155aec48d72196d5c64eee5c2517766760632 100644 (file)
@@ -2018,6 +2018,9 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
 
        ath_dbg(common, XMIT, "TX complete: skb: %p\n", skb);
 
+       if (sc->sc_ah->caldata)
+               sc->sc_ah->caldata->paprd_packet_sent = true;
+
        if (!(tx_flags & ATH_TX_ERROR))
                /* Frame was ACKed */
                tx_info->flags |= IEEE80211_TX_STAT_ACK;
index a299d42da8e74a358939b8fa5da8a32a01fd312b..58f89fa9c9f8a218ed29cfb93ad253c41471c648 100644 (file)
@@ -519,7 +519,7 @@ static void brcmf_usb_tx_complete(struct urb *urb)
        else
                devinfo->bus_pub.bus->dstats.tx_errors++;
 
-       dev_kfree_skb(req->skb);
+       brcmu_pkt_buf_free_skb(req->skb);
        req->skb = NULL;
        brcmf_usb_enq(devinfo, &devinfo->tx_freeq, req);
 
@@ -540,7 +540,7 @@ static void brcmf_usb_rx_complete(struct urb *urb)
                devinfo->bus_pub.bus->dstats.rx_packets++;
        } else {
                devinfo->bus_pub.bus->dstats.rx_errors++;
-               dev_kfree_skb(skb);
+               brcmu_pkt_buf_free_skb(skb);
                brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req);
                return;
        }
@@ -550,13 +550,15 @@ static void brcmf_usb_rx_complete(struct urb *urb)
                if (brcmf_proto_hdrpull(devinfo->dev, &ifidx, skb) != 0) {
                        brcmf_dbg(ERROR, "rx protocol error\n");
                        brcmu_pkt_buf_free_skb(skb);
+                       brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req);
                        devinfo->bus_pub.bus->dstats.rx_errors++;
                } else {
                        brcmf_rx_packet(devinfo->dev, ifidx, skb);
                        brcmf_usb_rx_refill(devinfo, req);
                }
        } else {
-               dev_kfree_skb(skb);
+               brcmu_pkt_buf_free_skb(skb);
+               brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req);
        }
        return;
 
@@ -581,14 +583,13 @@ static void brcmf_usb_rx_refill(struct brcmf_usbdev_info *devinfo,
        usb_fill_bulk_urb(req->urb, devinfo->usbdev, devinfo->rx_pipe,
                          skb->data, skb_tailroom(skb), brcmf_usb_rx_complete,
                          req);
-       req->urb->transfer_flags |= URB_ZERO_PACKET;
        req->devinfo = devinfo;
+       brcmf_usb_enq(devinfo, &devinfo->rx_postq, req);
 
        ret = usb_submit_urb(req->urb, GFP_ATOMIC);
-       if (ret == 0) {
-               brcmf_usb_enq(devinfo, &devinfo->rx_postq, req);
-       } else {
-               dev_kfree_skb(req->skb);
+       if (ret) {
+               brcmf_usb_del_fromq(devinfo, req);
+               brcmu_pkt_buf_free_skb(req->skb);
                req->skb = NULL;
                brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req);
        }
@@ -683,23 +684,22 @@ static int brcmf_usb_tx(struct device *dev, struct sk_buff *skb)
 
        req = brcmf_usb_deq(devinfo, &devinfo->tx_freeq);
        if (!req) {
+               brcmu_pkt_buf_free_skb(skb);
                brcmf_dbg(ERROR, "no req to send\n");
                return -ENOMEM;
        }
-       if (!req->urb) {
-               brcmf_dbg(ERROR, "no urb for req %p\n", req);
-               return -ENOBUFS;
-       }
 
        req->skb = skb;
        req->devinfo = devinfo;
        usb_fill_bulk_urb(req->urb, devinfo->usbdev, devinfo->tx_pipe,
                          skb->data, skb->len, brcmf_usb_tx_complete, req);
        req->urb->transfer_flags |= URB_ZERO_PACKET;
+       brcmf_usb_enq(devinfo, &devinfo->tx_postq, req);
        ret = usb_submit_urb(req->urb, GFP_ATOMIC);
-       if (!ret) {
-               brcmf_usb_enq(devinfo, &devinfo->tx_postq, req);
-       } else {
+       if (ret) {
+               brcmf_dbg(ERROR, "brcmf_usb_tx usb_submit_urb FAILED\n");
+               brcmf_usb_del_fromq(devinfo, req);
+               brcmu_pkt_buf_free_skb(req->skb);
                req->skb = NULL;
                brcmf_usb_enq(devinfo, &devinfo->tx_freeq, req);
        }
index 28c5fbb4af267b59ef1c8bab67f2d4ac2d7e9485..c36e9231244317945107f6cc3da3aaea034cc379 100644 (file)
@@ -1876,16 +1876,17 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
        }
 
        if (test_bit(WL_STATUS_CONNECTED, &cfg_priv->status)) {
-               scb_val.val = cpu_to_le32(0);
+               memset(&scb_val, 0, sizeof(scb_val));
                err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_RSSI, &scb_val,
                                      sizeof(struct brcmf_scb_val_le));
-               if (err)
+               if (err) {
                        WL_ERR("Could not get rssi (%d)\n", err);
-
-               rssi = le32_to_cpu(scb_val.val);
-               sinfo->filled |= STATION_INFO_SIGNAL;
-               sinfo->signal = rssi;
-               WL_CONN("RSSI %d dBm\n", rssi);
+               } else {
+                       rssi = le32_to_cpu(scb_val.val);
+                       sinfo->filled |= STATION_INFO_SIGNAL;
+                       sinfo->signal = rssi;
+                       WL_CONN("RSSI %d dBm\n", rssi);
+               }
        }
 
 done:
index 192ad5c1fcc8813805702d7b75c4ab89761cfd42..a5edebeb0b4f7f748155551df76396602885078e 100644 (file)
@@ -1233,6 +1233,9 @@ uint brcms_reset(struct brcms_info *wl)
        /* dpc will not be rescheduled */
        wl->resched = false;
 
+       /* inform publicly that interface is down */
+       wl->pub->up = false;
+
        return 0;
 }
 
index 95aa8e1683ecb4bdeb663e0cf605d699407b96f0..83324b3216527ec72195b35da104a4ce6c5ad6e1 100644 (file)
@@ -2042,7 +2042,8 @@ static void isr_indicate_associated(struct ipw2100_priv *priv, u32 status)
                return;
        }
        len = ETH_ALEN;
-       ipw2100_get_ordinal(priv, IPW_ORD_STAT_ASSN_AP_BSSID, &bssid, &len);
+       ret = ipw2100_get_ordinal(priv, IPW_ORD_STAT_ASSN_AP_BSSID, bssid,
+                                 &len);
        if (ret) {
                IPW_DEBUG_INFO("failed querying ordinals at line %d\n",
                               __LINE__);
index 46782f1102ac7c8e159c3ef764a54205326923fd..a47b306b522cd3a49fcbb1622083baacb245d621 100644 (file)
@@ -124,6 +124,9 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
        const struct fw_img *img;
        size_t bufsz;
 
+       if (!iwl_is_ready_rf(priv))
+               return -EAGAIN;
+
        /* default is to dump the entire data segment */
        if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) {
                priv->dbgfs_sram_offset = 0x800000;
index d9694c58208c38cd60575bb48f37bc75f42884bd..4ffc18dc3a5761cc6b63c5aa60691727459e5e27 100644 (file)
@@ -350,7 +350,7 @@ int iwl_queue_space(const struct iwl_queue *q);
 /*****************************************************
 * Error handling
 ******************************************************/
-int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display);
+int iwl_dump_fh(struct iwl_trans *trans, char **buf);
 void iwl_dump_csr(struct iwl_trans *trans);
 
 /*****************************************************
index 39a6ca1f009c39fdaf8c60775348f4790ede45dd..d1a61ba6247ab68a13e4cd8943959c92d0fa3452 100644 (file)
@@ -555,7 +555,7 @@ static void iwl_irq_handle_error(struct iwl_trans *trans)
        }
 
        iwl_dump_csr(trans);
-       iwl_dump_fh(trans, NULL, false);
+       iwl_dump_fh(trans, NULL);
 
        iwl_op_mode_nic_error(trans->op_mode);
 }
index 939c2f78df5833cc1d203deabe311568dc7ff88c..1e86ea2266d46971844ba5c66a2fb1142a47609d 100644 (file)
@@ -1649,13 +1649,9 @@ static const char *get_fh_string(int cmd)
 #undef IWL_CMD
 }
 
-int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display)
+int iwl_dump_fh(struct iwl_trans *trans, char **buf)
 {
        int i;
-#ifdef CONFIG_IWLWIFI_DEBUG
-       int pos = 0;
-       size_t bufsz = 0;
-#endif
        static const u32 fh_tbl[] = {
                FH_RSCSR_CHNL0_STTS_WPTR_REG,
                FH_RSCSR_CHNL0_RBDCB_BASE_REG,
@@ -1667,29 +1663,35 @@ int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display)
                FH_TSSR_TX_STATUS_REG,
                FH_TSSR_TX_ERROR_REG
        };
-#ifdef CONFIG_IWLWIFI_DEBUG
-       if (display) {
-               bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       if (buf) {
+               int pos = 0;
+               size_t bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
+
                *buf = kmalloc(bufsz, GFP_KERNEL);
                if (!*buf)
                        return -ENOMEM;
+
                pos += scnprintf(*buf + pos, bufsz - pos,
                                "FH register values:\n");
-               for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
+
+               for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
                        pos += scnprintf(*buf + pos, bufsz - pos,
                                "  %34s: 0X%08x\n",
                                get_fh_string(fh_tbl[i]),
                                iwl_read_direct32(trans, fh_tbl[i]));
-               }
+
                return pos;
        }
 #endif
+
        IWL_ERR(trans, "FH register values:\n");
-       for (i = 0; i <  ARRAY_SIZE(fh_tbl); i++) {
+       for (i = 0; i <  ARRAY_SIZE(fh_tbl); i++)
                IWL_ERR(trans, "  %34s: 0X%08x\n",
                        get_fh_string(fh_tbl[i]),
                        iwl_read_direct32(trans, fh_tbl[i]));
-       }
+
        return 0;
 }
 
@@ -1982,11 +1984,11 @@ static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
                                     size_t count, loff_t *ppos)
 {
        struct iwl_trans *trans = file->private_data;
-       char *buf;
+       char *buf = NULL;
        int pos = 0;
        ssize_t ret = -EFAULT;
 
-       ret = pos = iwl_dump_fh(trans, &buf, true);
+       ret = pos = iwl_dump_fh(trans, &buf);
        if (buf) {
                ret = simple_read_from_buffer(user_buf,
                                              count, ppos, buf, pos);
index e970897f6ab52370a632a64a62cc10bb3c39a3c6..4cb234349fbfacc305b1565ed5f1a30ba16aa108 100644 (file)
@@ -1326,6 +1326,11 @@ static int if_sdio_suspend(struct device *dev)
 
        mmc_pm_flag_t flags = sdio_get_host_pm_caps(func);
 
+       /* If we're powered off anyway, just let the mmc layer remove the
+        * card. */
+       if (!lbs_iface_active(card->priv))
+               return -ENOSYS;
+
        dev_info(dev, "%s: suspend: PM flags = 0x%x\n",
                 sdio_func_id(func), flags);
 
index c68adec3cc8b6522678c98582c7781b885193849..565527aee0ea3f73caa832f336c0ded06a3b22d9 100644 (file)
@@ -170,7 +170,20 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv,
        cmd_code = le16_to_cpu(host_cmd->command);
        cmd_size = le16_to_cpu(host_cmd->size);
 
-       skb_trim(cmd_node->cmd_skb, cmd_size);
+       /* Adjust skb length */
+       if (cmd_node->cmd_skb->len > cmd_size)
+               /*
+                * cmd_size is less than sizeof(struct host_cmd_ds_command).
+                * Trim off the unused portion.
+                */
+               skb_trim(cmd_node->cmd_skb, cmd_size);
+       else if (cmd_node->cmd_skb->len < cmd_size)
+               /*
+                * cmd_size is larger than sizeof(struct host_cmd_ds_command)
+                * because we have appended custom IE TLV. Increase skb length
+                * accordingly.
+                */
+               skb_put(cmd_node->cmd_skb, cmd_size - cmd_node->cmd_skb->len);
 
        do_gettimeofday(&tstamp);
        dev_dbg(adapter->dev, "cmd: DNLD_CMD: (%lu.%lu): %#x, act %#x, len %d,"
index 8b9dbd76a25255634ad79338223739c939429d26..64328af496f598bb3280784b6d2adfd25ec5cc70 100644 (file)
@@ -1611,6 +1611,7 @@ static int rt2400pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
 static int rt2400pci_probe_hw(struct rt2x00_dev *rt2x00dev)
 {
        int retval;
+       u32 reg;
 
        /*
         * Allocate eeprom data.
@@ -1623,6 +1624,14 @@ static int rt2400pci_probe_hw(struct rt2x00_dev *rt2x00dev)
        if (retval)
                return retval;
 
+       /*
+        * Enable rfkill polling by setting GPIO direction of the
+        * rfkill switch GPIO pin correctly.
+        */
+       rt2x00pci_register_read(rt2x00dev, GPIOCSR, &reg);
+       rt2x00_set_field32(&reg, GPIOCSR_BIT8, 1);
+       rt2x00pci_register_write(rt2x00dev, GPIOCSR, reg);
+
        /*
         * Initialize hw specifications.
         */
index d3a4a68cc439b22faea7f6e9d4899e9313e5a7e2..7564ae992b735179b15e24a3d616c5a71acb1aeb 100644 (file)
 #define GPIOCSR_BIT5                   FIELD32(0x00000020)
 #define GPIOCSR_BIT6                   FIELD32(0x00000040)
 #define GPIOCSR_BIT7                   FIELD32(0x00000080)
+#define GPIOCSR_BIT8                   FIELD32(0x00000100)
 
 /*
  * BBPPCSR: BBP Pin control register.
index d2cf8a4bc8b52fd985f4720df6bc20a9269069c3..3de0406735f6b7347b46cdf2305e413aaa17256d 100644 (file)
@@ -1929,6 +1929,7 @@ static int rt2500pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
 static int rt2500pci_probe_hw(struct rt2x00_dev *rt2x00dev)
 {
        int retval;
+       u32 reg;
 
        /*
         * Allocate eeprom data.
@@ -1941,6 +1942,14 @@ static int rt2500pci_probe_hw(struct rt2x00_dev *rt2x00dev)
        if (retval)
                return retval;
 
+       /*
+        * Enable rfkill polling by setting GPIO direction of the
+        * rfkill switch GPIO pin correctly.
+        */
+       rt2x00pci_register_read(rt2x00dev, GPIOCSR, &reg);
+       rt2x00_set_field32(&reg, GPIOCSR_DIR0, 1);
+       rt2x00pci_register_write(rt2x00dev, GPIOCSR, reg);
+
        /*
         * Initialize hw specifications.
         */
index 3aae36bb0a9e9f99cf89705bc2dca5022109824a..89fee311d8fda5ad07ae5ecd50fae567232aa35d 100644 (file)
@@ -283,7 +283,7 @@ static int rt2500usb_rfkill_poll(struct rt2x00_dev *rt2x00dev)
        u16 reg;
 
        rt2500usb_register_read(rt2x00dev, MAC_CSR19, &reg);
-       return rt2x00_get_field32(reg, MAC_CSR19_BIT7);
+       return rt2x00_get_field16(reg, MAC_CSR19_BIT7);
 }
 
 #ifdef CONFIG_RT2X00_LIB_LEDS
@@ -1768,6 +1768,7 @@ static int rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
 static int rt2500usb_probe_hw(struct rt2x00_dev *rt2x00dev)
 {
        int retval;
+       u16 reg;
 
        /*
         * Allocate eeprom data.
@@ -1780,6 +1781,14 @@ static int rt2500usb_probe_hw(struct rt2x00_dev *rt2x00dev)
        if (retval)
                return retval;
 
+       /*
+        * Enable rfkill polling by setting GPIO direction of the
+        * rfkill switch GPIO pin correctly.
+        */
+       rt2500usb_register_read(rt2x00dev, MAC_CSR19, &reg);
+       rt2x00_set_field16(&reg, MAC_CSR19_BIT8, 0);
+       rt2500usb_register_write(rt2x00dev, MAC_CSR19, reg);
+
        /*
         * Initialize hw specifications.
         */
index b493306a7eede0888af2cccef613c6612b579514..196bd5103e4f5450483ce1e60449021bf6eafd2c 100644 (file)
  * MAC_CSR19: GPIO control register.
  */
 #define MAC_CSR19                      0x0426
-#define MAC_CSR19_BIT0                 FIELD32(0x0001)
-#define MAC_CSR19_BIT1                 FIELD32(0x0002)
-#define MAC_CSR19_BIT2                 FIELD32(0x0004)
-#define MAC_CSR19_BIT3                 FIELD32(0x0008)
-#define MAC_CSR19_BIT4                 FIELD32(0x0010)
-#define MAC_CSR19_BIT5                 FIELD32(0x0020)
-#define MAC_CSR19_BIT6                 FIELD32(0x0040)
-#define MAC_CSR19_BIT7                 FIELD32(0x0080)
+#define MAC_CSR19_BIT0                 FIELD16(0x0001)
+#define MAC_CSR19_BIT1                 FIELD16(0x0002)
+#define MAC_CSR19_BIT2                 FIELD16(0x0004)
+#define MAC_CSR19_BIT3                 FIELD16(0x0008)
+#define MAC_CSR19_BIT4                 FIELD16(0x0010)
+#define MAC_CSR19_BIT5                 FIELD16(0x0020)
+#define MAC_CSR19_BIT6                 FIELD16(0x0040)
+#define MAC_CSR19_BIT7                 FIELD16(0x0080)
+#define MAC_CSR19_BIT8                 FIELD16(0x0100)
 
 /*
  * MAC_CSR20: LED control register.
index cb8c2aca54e4dfdac4a7e223a8070f4e4543399e..b93516d832fb5603e4bb3d287a4770c0c8de06ad 100644 (file)
@@ -4089,6 +4089,7 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
                rt2800_register_write(rt2x00dev, LDO_CFG0, reg);
                msleep(1);
                rt2800_register_read(rt2x00dev, LDO_CFG0, &reg);
+               rt2x00_set_field32(&reg, LDO_CFG0_LDO_CORE_VLEVEL, 0);
                rt2x00_set_field32(&reg, LDO_CFG0_BGSEL, 1);
                rt2800_register_write(rt2x00dev, LDO_CFG0, reg);
        }
index 98aa426a35649828e3c70c0f2bab0acf24e49381..4765bbd654cdcfeea617c84f9c755db05409600d 100644 (file)
@@ -983,6 +983,7 @@ static int rt2800pci_validate_eeprom(struct rt2x00_dev *rt2x00dev)
 static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev)
 {
        int retval;
+       u32 reg;
 
        /*
         * Allocate eeprom data.
@@ -995,6 +996,14 @@ static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev)
        if (retval)
                return retval;
 
+       /*
+        * Enable rfkill polling by setting GPIO direction of the
+        * rfkill switch GPIO pin correctly.
+        */
+       rt2x00pci_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg);
+       rt2x00_set_field32(&reg, GPIO_CTRL_CFG_GPIOD_BIT2, 1);
+       rt2x00pci_register_write(rt2x00dev, GPIO_CTRL_CFG, reg);
+
        /*
         * Initialize hw specifications.
         */
index 6cf336595e2544a5703612e5156af0559b5cc8ab..6b4226b716187ea037d2a1c84e012806649e8816 100644 (file)
@@ -667,8 +667,16 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
        skb_pull(entry->skb, RXINFO_DESC_SIZE);
 
        /*
-        * FIXME: we need to check for rx_pkt_len validity
+        * Check for rx_pkt_len validity. Return if invalid, leaving
+        * rxdesc->size zeroed out by the upper level.
         */
+       if (unlikely(rx_pkt_len == 0 ||
+                       rx_pkt_len > entry->queue->data_size)) {
+               ERROR(entry->queue->rt2x00dev,
+                       "Bad frame size %d, forcing to 0\n", rx_pkt_len);
+               return;
+       }
+
        rxd = (__le32 *)(entry->skb->data + rx_pkt_len);
 
        /*
@@ -736,6 +744,7 @@ static int rt2800usb_validate_eeprom(struct rt2x00_dev *rt2x00dev)
 static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev)
 {
        int retval;
+       u32 reg;
 
        /*
         * Allocate eeprom data.
@@ -748,6 +757,14 @@ static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev)
        if (retval)
                return retval;
 
+       /*
+        * Enable rfkill polling by setting GPIO direction of the
+        * rfkill switch GPIO pin correctly.
+        */
+       rt2x00usb_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg);
+       rt2x00_set_field32(&reg, GPIO_CTRL_CFG_GPIOD_BIT2, 1);
+       rt2x00usb_register_write(rt2x00dev, GPIO_CTRL_CFG, reg);
+
        /*
         * Initialize hw specifications.
         */
@@ -1157,6 +1174,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
        { USB_DEVICE(0x1690, 0x0744) },
        { USB_DEVICE(0x1690, 0x0761) },
        { USB_DEVICE(0x1690, 0x0764) },
+       /* ASUS */
+       { USB_DEVICE(0x0b05, 0x179d) },
        /* Cisco */
        { USB_DEVICE(0x167b, 0x4001) },
        /* EnGenius */
@@ -1222,7 +1241,6 @@ static struct usb_device_id rt2800usb_device_table[] = {
        { USB_DEVICE(0x0b05, 0x1760) },
        { USB_DEVICE(0x0b05, 0x1761) },
        { USB_DEVICE(0x0b05, 0x1790) },
-       { USB_DEVICE(0x0b05, 0x179d) },
        /* AzureWave */
        { USB_DEVICE(0x13d3, 0x3262) },
        { USB_DEVICE(0x13d3, 0x3284) },
index a6b88bd4a1a57d7f904c75faa62c95ea219be029..3f07e36f462b384565884580170faf7c3be2f25f 100644 (file)
@@ -629,7 +629,7 @@ void rt2x00lib_rxdone(struct queue_entry *entry, gfp_t gfp)
         */
        if (unlikely(rxdesc.size == 0 ||
                     rxdesc.size > entry->queue->data_size)) {
-               WARNING(rt2x00dev, "Wrong frame size %d max %d.\n",
+               ERROR(rt2x00dev, "Wrong frame size %d max %d.\n",
                        rxdesc.size, entry->queue->data_size);
                dev_kfree_skb(entry->skb);
                goto renew_skb;
index 3f7bc5cadf9a8a7a433688e8d480d76de3c1ab82..b8ec96163922a11711a3d6800b9556052c1386fc 100644 (file)
@@ -2832,6 +2832,7 @@ static int rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
 static int rt61pci_probe_hw(struct rt2x00_dev *rt2x00dev)
 {
        int retval;
+       u32 reg;
 
        /*
         * Disable power saving.
@@ -2849,6 +2850,14 @@ static int rt61pci_probe_hw(struct rt2x00_dev *rt2x00dev)
        if (retval)
                return retval;
 
+       /*
+        * Enable rfkill polling by setting GPIO direction of the
+        * rfkill switch GPIO pin correctly.
+        */
+       rt2x00pci_register_read(rt2x00dev, MAC_CSR13, &reg);
+       rt2x00_set_field32(&reg, MAC_CSR13_BIT13, 1);
+       rt2x00pci_register_write(rt2x00dev, MAC_CSR13, reg);
+
        /*
         * Initialize hw specifications.
         */
index e3cd6db76b0e561d481873d22c5637d3d1732446..8f3da5a56766f4c3293825c2d649078f4cb5455f 100644 (file)
@@ -372,6 +372,7 @@ struct hw_pairwise_ta_entry {
 #define MAC_CSR13_BIT10                        FIELD32(0x00000400)
 #define MAC_CSR13_BIT11                        FIELD32(0x00000800)
 #define MAC_CSR13_BIT12                        FIELD32(0x00001000)
+#define MAC_CSR13_BIT13                        FIELD32(0x00002000)
 
 /*
  * MAC_CSR14: LED control register.
index ba6e434b859d66506c26c5f05b7d5a569403c9d9..248436c13ce04ae1f79312c6cbb1e16d8a4b5fc9 100644 (file)
@@ -2177,6 +2177,7 @@ static int rt73usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
 static int rt73usb_probe_hw(struct rt2x00_dev *rt2x00dev)
 {
        int retval;
+       u32 reg;
 
        /*
         * Allocate eeprom data.
@@ -2189,6 +2190,14 @@ static int rt73usb_probe_hw(struct rt2x00_dev *rt2x00dev)
        if (retval)
                return retval;
 
+       /*
+        * Enable rfkill polling by setting GPIO direction of the
+        * rfkill switch GPIO pin correctly.
+        */
+       rt2x00usb_register_read(rt2x00dev, MAC_CSR13, &reg);
+       rt2x00_set_field32(&reg, MAC_CSR13_BIT15, 0);
+       rt2x00usb_register_write(rt2x00dev, MAC_CSR13, reg);
+
        /*
         * Initialize hw specifications.
         */
index 9f6b470414d33a687c8795b380add18cd8efaeb4..df1cc116b83be891ee2ff20702260f5949d3d983 100644 (file)
@@ -282,6 +282,9 @@ struct hw_pairwise_ta_entry {
 #define MAC_CSR13_BIT10                        FIELD32(0x00000400)
 #define MAC_CSR13_BIT11                        FIELD32(0x00000800)
 #define MAC_CSR13_BIT12                        FIELD32(0x00001000)
+#define MAC_CSR13_BIT13                        FIELD32(0x00002000)
+#define MAC_CSR13_BIT14                        FIELD32(0x00004000)
+#define MAC_CSR13_BIT15                        FIELD32(0x00008000)
 
 /*
  * MAC_CSR14: LED control register.
index 30899901aef56b00e05c5f062c86cabcb54127d8..650f79a1f2bd4a89cd96326d63c85982b24db0aa 100644 (file)
@@ -57,8 +57,7 @@
 static const struct ethtool_ops xennet_ethtool_ops;
 
 struct netfront_cb {
-       struct page *page;
-       unsigned offset;
+       int pull_to;
 };
 
 #define NETFRONT_SKB_CB(skb)   ((struct netfront_cb *)((skb)->cb))
@@ -867,15 +866,9 @@ static int handle_incoming_queue(struct net_device *dev,
        struct sk_buff *skb;
 
        while ((skb = __skb_dequeue(rxq)) != NULL) {
-               struct page *page = NETFRONT_SKB_CB(skb)->page;
-               void *vaddr = page_address(page);
-               unsigned offset = NETFRONT_SKB_CB(skb)->offset;
-
-               memcpy(skb->data, vaddr + offset,
-                      skb_headlen(skb));
+               int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
 
-               if (page != skb_frag_page(&skb_shinfo(skb)->frags[0]))
-                       __free_page(page);
+               __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
 
                /* Ethernet work: Delayed to here as it peeks the header. */
                skb->protocol = eth_type_trans(skb, dev);
@@ -913,7 +906,6 @@ static int xennet_poll(struct napi_struct *napi, int budget)
        struct sk_buff_head errq;
        struct sk_buff_head tmpq;
        unsigned long flags;
-       unsigned int len;
        int err;
 
        spin_lock(&np->rx_lock);
@@ -955,24 +947,13 @@ err:
                        }
                }
 
-               NETFRONT_SKB_CB(skb)->page =
-                       skb_frag_page(&skb_shinfo(skb)->frags[0]);
-               NETFRONT_SKB_CB(skb)->offset = rx->offset;
-
-               len = rx->status;
-               if (len > RX_COPY_THRESHOLD)
-                       len = RX_COPY_THRESHOLD;
-               skb_put(skb, len);
+               NETFRONT_SKB_CB(skb)->pull_to = rx->status;
+               if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD)
+                       NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD;
 
-               if (rx->status > len) {
-                       skb_shinfo(skb)->frags[0].page_offset =
-                               rx->offset + len;
-                       skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status - len);
-                       skb->data_len = rx->status - len;
-               } else {
-                       __skb_fill_page_desc(skb, 0, NULL, 0, 0);
-                       skb_shinfo(skb)->nr_frags = 0;
-               }
+               skb_shinfo(skb)->frags[0].page_offset = rx->offset;
+               skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status);
+               skb->data_len = rx->status;
 
                i = xennet_fill_frags(np, skb, &tmpq);
 
@@ -999,7 +980,7 @@ err:
                 * receive throughout using the standard receive
                 * buffer size was cut by 25%(!!!).
                 */
-               skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len);
+               skb->truesize += skb->data_len - RX_COPY_THRESHOLD;
                skb->len += skb->data_len;
 
                if (rx->flags & XEN_NETRXF_csum_blank)
index 5270f1a99328d678396739781c7b0d4a87a586ef..0862b727d7c3689d2e796ac6cf4beb3aeb5add45 100644 (file)
@@ -280,8 +280,12 @@ static long local_pci_probe(void *_ddi)
 {
        struct drv_dev_and_id *ddi = _ddi;
        struct device *dev = &ddi->dev->dev;
+       struct device *parent = dev->parent;
        int rc;
 
+       /* The parent bridge must be in active state when probing */
+       if (parent)
+               pm_runtime_get_sync(parent);
        /* Unbound PCI devices are always set to disabled and suspended.
         * During probe, the device is set to enabled and active and the
         * usage count is incremented.  If the driver supports runtime PM,
@@ -298,6 +302,8 @@ static long local_pci_probe(void *_ddi)
                pm_runtime_set_suspended(dev);
                pm_runtime_put_noidle(dev);
        }
+       if (parent)
+               pm_runtime_put(parent);
        return rc;
 }
 
@@ -623,21 +629,6 @@ static int pci_pm_prepare(struct device *dev)
        struct device_driver *drv = dev->driver;
        int error = 0;
 
-       /*
-        * If a PCI device configured to wake up the system from sleep states
-        * has been suspended at run time and there's a resume request pending
-        * for it, this is equivalent to the device signaling wakeup, so the
-        * system suspend operation should be aborted.
-        */
-       pm_runtime_get_noresume(dev);
-       if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
-               pm_wakeup_event(dev, 0);
-
-       if (pm_wakeup_pending()) {
-               pm_runtime_put_sync(dev);
-               return -EBUSY;
-       }
-
        /*
         * PCI devices suspended at run time need to be resumed at this
         * point, because in general it is necessary to reconfigure them for
@@ -661,8 +652,6 @@ static void pci_pm_complete(struct device *dev)
 
        if (drv && drv->pm && drv->pm->complete)
                drv->pm->complete(dev);
-
-       pm_runtime_put_sync(dev);
 }
 
 #else /* !CONFIG_PM_SLEEP */
index 6869009c7393f7081497c51901f11f31e62957bf..02d107b152818e948cc3e561d276bf24126ac997 100644 (file)
@@ -458,6 +458,40 @@ boot_vga_show(struct device *dev, struct device_attribute *attr, char *buf)
 }
 struct device_attribute vga_attr = __ATTR_RO(boot_vga);
 
+static void
+pci_config_pm_runtime_get(struct pci_dev *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct device *parent = dev->parent;
+
+       if (parent)
+               pm_runtime_get_sync(parent);
+       pm_runtime_get_noresume(dev);
+       /*
+        * pdev->current_state is set to PCI_D3cold during suspending,
+        * so wait until suspending completes
+        */
+       pm_runtime_barrier(dev);
+       /*
+        * Only need to resume devices in D3cold, because config
+        * registers are still accessible for devices suspended but
+        * not in D3cold.
+        */
+       if (pdev->current_state == PCI_D3cold)
+               pm_runtime_resume(dev);
+}
+
+static void
+pci_config_pm_runtime_put(struct pci_dev *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct device *parent = dev->parent;
+
+       pm_runtime_put(dev);
+       if (parent)
+               pm_runtime_put_sync(parent);
+}
+
 static ssize_t
 pci_read_config(struct file *filp, struct kobject *kobj,
                struct bin_attribute *bin_attr,
@@ -484,6 +518,8 @@ pci_read_config(struct file *filp, struct kobject *kobj,
                size = count;
        }
 
+       pci_config_pm_runtime_get(dev);
+
        if ((off & 1) && size) {
                u8 val;
                pci_user_read_config_byte(dev, off, &val);
@@ -529,6 +565,8 @@ pci_read_config(struct file *filp, struct kobject *kobj,
                --size;
        }
 
+       pci_config_pm_runtime_put(dev);
+
        return count;
 }
 
@@ -549,6 +587,8 @@ pci_write_config(struct file* filp, struct kobject *kobj,
                count = size;
        }
        
+       pci_config_pm_runtime_get(dev);
+
        if ((off & 1) && size) {
                pci_user_write_config_byte(dev, off, data[off - init_off]);
                off++;
@@ -587,6 +627,8 @@ pci_write_config(struct file* filp, struct kobject *kobj,
                --size;
        }
 
+       pci_config_pm_runtime_put(dev);
+
        return count;
 }
 
index f3ea977a5b1bf6f458c8ed0757cd8e02cc1d3ff2..ab4bf5a4c2f12ee6dc90a83666d03387fc618fca 100644 (file)
@@ -1941,6 +1941,7 @@ void pci_pm_init(struct pci_dev *dev)
        dev->pm_cap = pm;
        dev->d3_delay = PCI_PM_D3_WAIT;
        dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
+       dev->d3cold_allowed = true;
 
        dev->d1_support = false;
        dev->d2_support = false;
index 3a7eefcb270a5dda9a2dcbcead63086da4433c3e..e76b44777dbf7f2893ecdcde08997cad1d904eb4 100644 (file)
@@ -140,9 +140,17 @@ static int pcie_port_runtime_resume(struct device *dev)
 {
        return 0;
 }
+
+static int pcie_port_runtime_idle(struct device *dev)
+{
+       /* Delay for a short while to prevent too frequent suspend/resume */
+       pm_schedule_suspend(dev, 10);
+       return -EBUSY;
+}
 #else
 #define pcie_port_runtime_suspend      NULL
 #define pcie_port_runtime_resume       NULL
+#define pcie_port_runtime_idle         NULL
 #endif
 
 static const struct dev_pm_ops pcie_portdrv_pm_ops = {
@@ -155,6 +163,7 @@ static const struct dev_pm_ops pcie_portdrv_pm_ops = {
        .resume_noirq   = pcie_port_resume_noirq,
        .runtime_suspend = pcie_port_runtime_suspend,
        .runtime_resume = pcie_port_runtime_resume,
+       .runtime_idle   = pcie_port_runtime_idle,
 };
 
 #define PCIE_PORTDRV_PM_OPS    (&pcie_portdrv_pm_ops)
@@ -200,6 +209,11 @@ static int __devinit pcie_portdrv_probe(struct pci_dev *dev,
                return status;
 
        pci_save_state(dev);
+       /*
+        * D3cold may not work properly on some PCIe port, so disable
+        * it by default.
+        */
+       dev->d3cold_allowed = false;
        if (!pci_match_id(port_runtime_pm_black_list, dev))
                pm_runtime_put_noidle(&dev->dev);
 
index 6c143b4497ca4381677dcc34459ecf269e5cdc88..9f8a6b79a8ecf22d604056a902e7ca7adee8520a 100644 (file)
@@ -144,15 +144,13 @@ static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar)
        case PCI_BASE_ADDRESS_MEM_TYPE_32:
                break;
        case PCI_BASE_ADDRESS_MEM_TYPE_1M:
-               dev_info(&dev->dev, "1M mem BAR treated as 32-bit BAR\n");
+               /* 1M mem BAR treated as 32-bit BAR */
                break;
        case PCI_BASE_ADDRESS_MEM_TYPE_64:
                flags |= IORESOURCE_MEM_64;
                break;
        default:
-               dev_warn(&dev->dev,
-                        "mem unknown type %x treated as 32-bit BAR\n",
-                        mem_type);
+               /* mem unknown type treated as 32-bit BAR */
                break;
        }
        return flags;
@@ -173,9 +171,11 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
        u32 l, sz, mask;
        u16 orig_cmd;
        struct pci_bus_region region;
+       bool bar_too_big = false, bar_disabled = false;
 
        mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
 
+       /* No printks while decoding is disabled! */
        if (!dev->mmio_always_on) {
                pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
                pci_write_config_word(dev, PCI_COMMAND,
@@ -240,8 +240,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
                        goto fail;
 
                if ((sizeof(resource_size_t) < 8) && (sz64 > 0x100000000ULL)) {
-                       dev_err(&dev->dev, "reg %x: can't handle 64-bit BAR\n",
-                               pos);
+                       bar_too_big = true;
                        goto fail;
                }
 
@@ -252,12 +251,11 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
                        region.start = 0;
                        region.end = sz64;
                        pcibios_bus_to_resource(dev, res, &region);
+                       bar_disabled = true;
                } else {
                        region.start = l64;
                        region.end = l64 + sz64;
                        pcibios_bus_to_resource(dev, res, &region);
-                       dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n",
-                                  pos, res);
                }
        } else {
                sz = pci_size(l, sz, mask);
@@ -268,18 +266,23 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
                region.start = l;
                region.end = l + sz;
                pcibios_bus_to_resource(dev, res, &region);
-
-               dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n", pos, res);
        }
 
- out:
+       goto out;
+
+
+fail:
+       res->flags = 0;
+out:
        if (!dev->mmio_always_on)
                pci_write_config_word(dev, PCI_COMMAND, orig_cmd);
 
+       if (bar_too_big)
+               dev_err(&dev->dev, "reg %x: can't handle 64-bit BAR\n", pos);
+       if (res->flags && !bar_disabled)
+               dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n", pos, res);
+
        return (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
- fail:
-       res->flags = 0;
-       goto out;
 }
 
 static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
index 3782e1cd3697020219d34b81aa039a8c767be486..934d861a32359bb553ce17966d152f089b31c04d 100644 (file)
@@ -2196,10 +2196,8 @@ static int __init acer_wmi_init(void)
                interface->capability &= ~ACER_CAP_BRIGHTNESS;
                pr_info("Brightness must be controlled by acpi video driver\n");
        } else {
-#ifdef CONFIG_ACPI_VIDEO
                pr_info("Disabling ACPI video driver\n");
                acpi_video_unregister();
-#endif
        }
 
        if (wmi_has_guid(WMID_GUID3)) {
index dfb1a92ce9497cb49e913fa23d8f8819300243e9..db8f63841b4265922b5a630c020e00cb4f490ad3 100644 (file)
@@ -101,7 +101,7 @@ static void gmux_pio_write32(struct apple_gmux_data *gmux_data, int port,
 
        for (i = 0; i < 4; i++) {
                tmpval = (val >> (i * 8)) & 0xff;
-               outb(tmpval, port + i);
+               outb(tmpval, gmux_data->iostart + port + i);
        }
 }
 
@@ -142,8 +142,9 @@ static u8 gmux_index_read8(struct apple_gmux_data *gmux_data, int port)
        u8 val;
 
        mutex_lock(&gmux_data->index_lock);
-       outb((port & 0xff), gmux_data->iostart + GMUX_PORT_READ);
        gmux_index_wait_ready(gmux_data);
+       outb((port & 0xff), gmux_data->iostart + GMUX_PORT_READ);
+       gmux_index_wait_complete(gmux_data);
        val = inb(gmux_data->iostart + GMUX_PORT_VALUE);
        mutex_unlock(&gmux_data->index_lock);
 
@@ -166,8 +167,9 @@ static u32 gmux_index_read32(struct apple_gmux_data *gmux_data, int port)
        u32 val;
 
        mutex_lock(&gmux_data->index_lock);
-       outb((port & 0xff), gmux_data->iostart + GMUX_PORT_READ);
        gmux_index_wait_ready(gmux_data);
+       outb((port & 0xff), gmux_data->iostart + GMUX_PORT_READ);
+       gmux_index_wait_complete(gmux_data);
        val = inl(gmux_data->iostart + GMUX_PORT_VALUE);
        mutex_unlock(&gmux_data->index_lock);
 
@@ -461,18 +463,22 @@ static int __devinit gmux_probe(struct pnp_dev *pnp,
        ver_release = gmux_read8(gmux_data, GMUX_PORT_VERSION_RELEASE);
        if (ver_major == 0xff && ver_minor == 0xff && ver_release == 0xff) {
                if (gmux_is_indexed(gmux_data)) {
+                       u32 version;
                        mutex_init(&gmux_data->index_lock);
                        gmux_data->indexed = true;
+                       version = gmux_read32(gmux_data,
+                               GMUX_PORT_VERSION_MAJOR);
+                       ver_major = (version >> 24) & 0xff;
+                       ver_minor = (version >> 16) & 0xff;
+                       ver_release = (version >> 8) & 0xff;
                } else {
                        pr_info("gmux device not present\n");
                        ret = -ENODEV;
                        goto err_release;
                }
-               pr_info("Found indexed gmux\n");
-       } else {
-               pr_info("Found gmux version %d.%d.%d\n", ver_major, ver_minor,
-                       ver_release);
        }
+       pr_info("Found gmux version %d.%d.%d [%s]\n", ver_major, ver_minor,
+               ver_release, (gmux_data->indexed ? "indexed" : "classic"));
 
        memset(&props, 0, sizeof(props));
        props.type = BACKLIGHT_PLATFORM;
@@ -505,9 +511,7 @@ static int __devinit gmux_probe(struct pnp_dev *pnp,
         * Disable the other backlight choices.
         */
        acpi_video_dmi_promote_vendor();
-#if defined (CONFIG_ACPI_VIDEO) || defined (CONFIG_ACPI_VIDEO_MODULE)
        acpi_video_unregister();
-#endif
        apple_bl_unregister();
 
        gmux_data->power_state = VGA_SWITCHEROO_ON;
@@ -593,9 +597,7 @@ static void __devexit gmux_remove(struct pnp_dev *pnp)
        kfree(gmux_data);
 
        acpi_video_dmi_demote_vendor();
-#if defined (CONFIG_ACPI_VIDEO) || defined (CONFIG_ACPI_VIDEO_MODULE)
        acpi_video_register();
-#endif
        apple_bl_register();
 }
 
index e38f91be0b10964c12f6a9b8eb4347908fc54d94..4b568df56643f846de78a3228582342fb3a89c67 100644 (file)
@@ -85,7 +85,7 @@ static char *wled_type = "unknown";
 static char *bled_type = "unknown";
 
 module_param(wled_type, charp, 0444);
-MODULE_PARM_DESC(wlan_status, "Set the wled type on boot "
+MODULE_PARM_DESC(wled_type, "Set the wled type on boot "
                 "(unknown, led or rfkill). "
                 "default is unknown");
 
@@ -863,9 +863,9 @@ static ssize_t show_infos(struct device *dev,
         * The significance of others is yet to be found.
         * If we don't find the method, we assume the device are present.
         */
-       rv = acpi_evaluate_integer(asus->handle, "HRWS", NULL, &temp);
+       rv = acpi_evaluate_integer(asus->handle, "HWRS", NULL, &temp);
        if (!ACPI_FAILURE(rv))
-               len += sprintf(page + len, "HRWS value         : %#x\n",
+               len += sprintf(page + len, "HWRS value         : %#x\n",
                               (uint) temp);
        /*
         * Another value for userspace: the ASYM method returns 0x02 for
@@ -1751,9 +1751,9 @@ static int asus_laptop_get_info(struct asus_laptop *asus)
         * The significance of others is yet to be found.
         */
        status =
-           acpi_evaluate_integer(asus->handle, "HRWS", NULL, &hwrs_result);
+           acpi_evaluate_integer(asus->handle, "HWRS", NULL, &hwrs_result);
        if (!ACPI_FAILURE(status))
-               pr_notice("  HRWS returned %x", (int)hwrs_result);
+               pr_notice("  HWRS returned %x", (int)hwrs_result);
 
        if (!acpi_check_handle(asus->handle, METHOD_WL_STATUS, NULL))
                asus->have_rsts = true;
index 2eb9fe8e8efd038c7bb1d25ad4fbde2961ca8e1a..c0e9ff489b2417f2469e7abc7d2289e4b971c8a2 100644 (file)
@@ -47,9 +47,7 @@
 #include <linux/thermal.h>
 #include <acpi/acpi_bus.h>
 #include <acpi/acpi_drivers.h>
-#ifdef CONFIG_ACPI_VIDEO
 #include <acpi/video.h>
-#endif
 
 #include "asus-wmi.h"
 
@@ -1704,10 +1702,8 @@ static int asus_wmi_add(struct platform_device *pdev)
        if (asus->driver->quirks->wmi_backlight_power)
                acpi_video_dmi_promote_vendor();
        if (!acpi_video_backlight_support()) {
-#ifdef CONFIG_ACPI_VIDEO
                pr_info("Disabling ACPI video driver\n");
                acpi_video_unregister();
-#endif
                err = asus_wmi_backlight_init(asus);
                if (err && err != -ENODEV)
                        goto fail_backlight;
index dab91b48d22cf5fbb955efd7ecb5fc0f6fa318c1..5ca264179f4e32a758102b0a05f865164250534b 100644 (file)
@@ -610,12 +610,12 @@ static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc, acpi_handle handle)
 
                if (!bus) {
                        pr_warn("Unable to find PCI bus 1?\n");
-                       goto out_unlock;
+                       goto out_put_dev;
                }
 
                if (pci_bus_read_config_dword(bus, 0, PCI_VENDOR_ID, &l)) {
                        pr_err("Unable to read PCI config space?\n");
-                       goto out_unlock;
+                       goto out_put_dev;
                }
 
                absent = (l == 0xffffffff);
@@ -627,7 +627,7 @@ static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc, acpi_handle handle)
                                absent ? "absent" : "present");
                        pr_warn("skipped wireless hotplug as probably "
                                "inappropriate for this model\n");
-                       goto out_unlock;
+                       goto out_put_dev;
                }
 
                if (!blocked) {
@@ -635,7 +635,7 @@ static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc, acpi_handle handle)
                        if (dev) {
                                /* Device already present */
                                pci_dev_put(dev);
-                               goto out_unlock;
+                               goto out_put_dev;
                        }
                        dev = pci_scan_single_device(bus, 0);
                        if (dev) {
@@ -650,6 +650,8 @@ static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc, acpi_handle handle)
                                pci_dev_put(dev);
                        }
                }
+out_put_dev:
+               pci_dev_put(port);
        }
 
 out_unlock:
index c1ca7bcebb66b52bfb033fc24cc0bc2856bd313d..dd90d15f52101e24296b30523d68056e01bac7df 100644 (file)
@@ -26,9 +26,7 @@
 #include <linux/seq_file.h>
 #include <linux/debugfs.h>
 #include <linux/ctype.h>
-#ifdef CONFIG_ACPI_VIDEO
 #include <acpi/video.h>
-#endif
 
 /*
  * This driver is needed because a number of Samsung laptops do not hook
@@ -1558,9 +1556,7 @@ static int __init samsung_init(void)
                samsung->handle_backlight = false;
        } else if (samsung->quirks->broken_acpi_video) {
                pr_info("Disabling ACPI video driver\n");
-#ifdef CONFIG_ACPI_VIDEO
                acpi_video_unregister();
-#endif
        }
 #endif
 
index 80e377949314ba37b3f00f09655290ed062349ed..52daaa816e53691792b6071955dbe680c4721b91 100644 (file)
@@ -545,7 +545,7 @@ TPACPI_HANDLE(hkey, ec, "\\_SB.HKEY",       /* 600e/x, 770e, 770x */
  */
 
 static int acpi_evalf(acpi_handle handle,
-                     void *res, char *method, char *fmt, ...)
+                     int *res, char *method, char *fmt, ...)
 {
        char *fmt0 = fmt;
        struct acpi_object_list params;
@@ -606,7 +606,7 @@ static int acpi_evalf(acpi_handle handle,
                success = (status == AE_OK &&
                           out_obj.type == ACPI_TYPE_INTEGER);
                if (success && res)
-                       *(int *)res = out_obj.integer.value;
+                       *res = out_obj.integer.value;
                break;
        case 'v':               /* void */
                success = status == AE_OK;
@@ -7386,17 +7386,18 @@ static int fan_get_status(u8 *status)
         * Add TPACPI_FAN_RD_ACPI_FANS ? */
 
        switch (fan_status_access_mode) {
-       case TPACPI_FAN_RD_ACPI_GFAN:
+       case TPACPI_FAN_RD_ACPI_GFAN: {
                /* 570, 600e/x, 770e, 770x */
+               int res;
 
-               if (unlikely(!acpi_evalf(gfan_handle, &s, NULL, "d")))
+               if (unlikely(!acpi_evalf(gfan_handle, &res, NULL, "d")))
                        return -EIO;
 
                if (likely(status))
-                       *status = s & 0x07;
+                       *status = res & 0x07;
 
                break;
-
+       }
        case TPACPI_FAN_RD_TPEC:
                /* all except 570, 600e/x, 770e, 770x */
                if (unlikely(!acpi_ec_read(fan_status_offset, &s)))
index 0b66d0f259224f9e7ca679a2759a473708cd3be6..4b6688909fee076c5573e086b972b7b3cf5ebffc 100644 (file)
@@ -100,6 +100,13 @@ static int ecap_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
                writel(period_cycles, pc->mmio_base + CAP3);
        }
 
+       if (!test_bit(PWMF_ENABLED, &pwm->flags)) {
+               reg_val = readw(pc->mmio_base + ECCTL2);
+               /* Disable APWM mode to put APWM output Low */
+               reg_val &= ~ECCTL2_APWM_MODE;
+               writew(reg_val, pc->mmio_base + ECCTL2);
+       }
+
        pm_runtime_put_sync(pc->chip.dev);
        return 0;
 }
index c3756d1be19496fdf3dd163787b7e289e9ad70bd..b1996bcd5b788fd8923ef646b828d71dbc769265 100644 (file)
@@ -104,6 +104,7 @@ struct ehrpwm_pwm_chip {
        struct pwm_chip chip;
        unsigned int    clk_rate;
        void __iomem    *mmio_base;
+       unsigned long period_cycles[NUM_PWM_CHANNEL];
 };
 
 static inline struct ehrpwm_pwm_chip *to_ehrpwm_pwm_chip(struct pwm_chip *chip)
@@ -210,6 +211,7 @@ static int ehrpwm_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
        unsigned long long c;
        unsigned long period_cycles, duty_cycles;
        unsigned short ps_divval, tb_divval;
+       int i;
 
        if (period_ns < 0 || duty_ns < 0 || period_ns > NSEC_PER_SEC)
                return -ERANGE;
@@ -229,6 +231,28 @@ static int ehrpwm_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
                duty_cycles = (unsigned long)c;
        }
 
+       /*
+        * Period values should be same for multiple PWM channels as IP uses
+        * same period register for multiple channels.
+        */
+       for (i = 0; i < NUM_PWM_CHANNEL; i++) {
+               if (pc->period_cycles[i] &&
+                               (pc->period_cycles[i] != period_cycles)) {
+                       /*
+                        * Allow channel to reconfigure period if no other
+                        * channels being configured.
+                        */
+                       if (i == pwm->hwpwm)
+                               continue;
+
+                       dev_err(chip->dev, "Period value conflicts with channel %d\n",
+                                       i);
+                       return -EINVAL;
+               }
+       }
+
+       pc->period_cycles[pwm->hwpwm] = period_cycles;
+
        /* Configure clock prescaler to support Low frequency PWM wave */
        if (set_prescale_div(period_cycles/PERIOD_MAX, &ps_divval,
                                &tb_divval)) {
@@ -320,10 +344,15 @@ static void ehrpwm_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
 
 static void ehrpwm_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
 {
+       struct ehrpwm_pwm_chip *pc = to_ehrpwm_pwm_chip(chip);
+
        if (test_bit(PWMF_ENABLED, &pwm->flags)) {
                dev_warn(chip->dev, "Removing PWM device without disabling\n");
                pm_runtime_put_sync(chip->dev);
        }
+
+       /* set period value to zero on free */
+       pc->period_cycles[pwm->hwpwm] = 0;
 }
 
 static const struct pwm_ops ehrpwm_pwm_ops = {
index 6caa222af77a284f09f1e8a5f634da794dd84838..ab00cab905b730459c6e752907f1139aaec9ee57 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/err.h>
 #include <linux/platform_device.h>
 
+#include <linux/regulator/of_regulator.h>
 #include <linux/regulator/driver.h>
 #include <linux/regulator/machine.h>
 #include <linux/mfd/tps65217.h>
@@ -281,37 +282,130 @@ static const struct regulator_desc regulators[] = {
                           NULL),
 };
 
+#ifdef CONFIG_OF
+static struct of_regulator_match reg_matches[] = {
+       { .name = "dcdc1", .driver_data = (void *)TPS65217_DCDC_1 },
+       { .name = "dcdc2", .driver_data = (void *)TPS65217_DCDC_2 },
+       { .name = "dcdc3", .driver_data = (void *)TPS65217_DCDC_3 },
+       { .name = "ldo1", .driver_data = (void *)TPS65217_LDO_1 },
+       { .name = "ldo2", .driver_data = (void *)TPS65217_LDO_2 },
+       { .name = "ldo3", .driver_data = (void *)TPS65217_LDO_3 },
+       { .name = "ldo4", .driver_data = (void *)TPS65217_LDO_4 },
+};
+
+static struct tps65217_board *tps65217_parse_dt(struct platform_device *pdev)
+{
+       struct tps65217 *tps = dev_get_drvdata(pdev->dev.parent);
+       struct device_node *node = tps->dev->of_node;
+       struct tps65217_board *pdata;
+       struct device_node *regs;
+       int i, count;
+
+       regs = of_find_node_by_name(node, "regulators");
+       if (!regs)
+               return NULL;
+
+       count = of_regulator_match(pdev->dev.parent, regs,
+                               reg_matches, TPS65217_NUM_REGULATOR);
+       of_node_put(regs);
+       if ((count < 0) || (count > TPS65217_NUM_REGULATOR))
+               return NULL;
+
+       pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+       if (!pdata)
+               return NULL;
+
+       for (i = 0; i < count; i++) {
+               if (!reg_matches[i].init_data || !reg_matches[i].of_node)
+                       continue;
+
+               pdata->tps65217_init_data[i] = reg_matches[i].init_data;
+               pdata->of_node[i] = reg_matches[i].of_node;
+       }
+
+       return pdata;
+}
+#else
+static struct tps65217_board *tps65217_parse_dt(struct platform_device *pdev)
+{
+       return NULL;
+}
+#endif
+
 static int __devinit tps65217_regulator_probe(struct platform_device *pdev)
 {
+       struct tps65217 *tps = dev_get_drvdata(pdev->dev.parent);
+       struct tps65217_board *pdata = dev_get_platdata(tps->dev);
+       struct regulator_init_data *reg_data;
        struct regulator_dev *rdev;
-       struct tps65217 *tps;
-       struct tps_info *info = &tps65217_pmic_regs[pdev->id];
        struct regulator_config config = { };
+       int i, ret;
 
-       /* Already set by core driver */
-       tps = dev_to_tps65217(pdev->dev.parent);
-       tps->info[pdev->id] = info;
+       if (tps->dev->of_node)
+               pdata = tps65217_parse_dt(pdev);
 
-       config.dev = &pdev->dev;
-       config.of_node = pdev->dev.of_node;
-       config.init_data = pdev->dev.platform_data;
-       config.driver_data = tps;
+       if (!pdata) {
+               dev_err(&pdev->dev, "Platform data not found\n");
+               return -EINVAL;
+       }
 
-       rdev = regulator_register(&regulators[pdev->id], &config);
-       if (IS_ERR(rdev))
-               return PTR_ERR(rdev);
+       if (tps65217_chip_id(tps) != TPS65217) {
+               dev_err(&pdev->dev, "Invalid tps chip version\n");
+               return -ENODEV;
+       }
 
-       platform_set_drvdata(pdev, rdev);
+       platform_set_drvdata(pdev, tps);
 
+       for (i = 0; i < TPS65217_NUM_REGULATOR; i++) {
+
+               reg_data = pdata->tps65217_init_data[i];
+
+               /*
+                * Regulator API handles empty constraints but not NULL
+                * constraints
+                */
+               if (!reg_data)
+                       continue;
+
+               /* Register the regulators */
+               tps->info[i] = &tps65217_pmic_regs[i];
+
+               config.dev = tps->dev;
+               config.init_data = reg_data;
+               config.driver_data = tps;
+               config.regmap = tps->regmap;
+               if (tps->dev->of_node)
+                       config.of_node = pdata->of_node[i];
+
+               rdev = regulator_register(&regulators[i], &config);
+               if (IS_ERR(rdev)) {
+                       dev_err(tps->dev, "failed to register %s regulator\n",
+                               pdev->name);
+                       ret = PTR_ERR(rdev);
+                       goto err_unregister_regulator;
+               }
+
+               /* Save regulator for cleanup */
+               tps->rdev[i] = rdev;
+       }
        return 0;
+
+err_unregister_regulator:
+       while (--i >= 0)
+               regulator_unregister(tps->rdev[i]);
+
+       return ret;
 }
 
 static int __devexit tps65217_regulator_remove(struct platform_device *pdev)
 {
-       struct regulator_dev *rdev = platform_get_drvdata(pdev);
+       struct tps65217 *tps = platform_get_drvdata(pdev);
+       unsigned int i;
+
+       for (i = 0; i < TPS65217_NUM_REGULATOR; i++)
+               regulator_unregister(tps->rdev[i]);
 
        platform_set_drvdata(pdev, NULL);
-       regulator_unregister(rdev);
 
        return 0;
 }
index 831868904e02fd39b7058de5b4e7a65c9d8555b3..1dd61f402b040441c99078214017f802050c8fa5 100644 (file)
@@ -58,6 +58,7 @@ struct sam9_rtc {
        struct rtc_device       *rtcdev;
        u32                     imr;
        void __iomem            *gpbr;
+       int                     irq;
 };
 
 #define rtt_readl(rtc, field) \
@@ -292,7 +293,7 @@ static int __devinit at91_rtc_probe(struct platform_device *pdev)
 {
        struct resource *r, *r_gpbr;
        struct sam9_rtc *rtc;
-       int             ret;
+       int             ret, irq;
        u32             mr;
 
        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -302,10 +303,18 @@ static int __devinit at91_rtc_probe(struct platform_device *pdev)
                return -ENODEV;
        }
 
+       irq = platform_get_irq(pdev, 0);
+       if (irq < 0) {
+               dev_err(&pdev->dev, "failed to get interrupt resource\n");
+               return irq;
+       }
+
        rtc = kzalloc(sizeof *rtc, GFP_KERNEL);
        if (!rtc)
                return -ENOMEM;
 
+       rtc->irq = irq;
+
        /* platform setup code should have handled this; sigh */
        if (!device_can_wakeup(&pdev->dev))
                device_init_wakeup(&pdev->dev, 1);
@@ -345,11 +354,10 @@ static int __devinit at91_rtc_probe(struct platform_device *pdev)
        }
 
        /* register irq handler after we know what name we'll use */
-       ret = request_irq(AT91_ID_SYS, at91_rtc_interrupt,
-                               IRQF_SHARED,
+       ret = request_irq(rtc->irq, at91_rtc_interrupt, IRQF_SHARED,
                                dev_name(&rtc->rtcdev->dev), rtc);
        if (ret) {
-               dev_dbg(&pdev->dev, "can't share IRQ %d?\n", AT91_ID_SYS);
+               dev_dbg(&pdev->dev, "can't share IRQ %d?\n", rtc->irq);
                rtc_device_unregister(rtc->rtcdev);
                goto fail_register;
        }
@@ -386,7 +394,7 @@ static int __devexit at91_rtc_remove(struct platform_device *pdev)
 
        /* disable all interrupts */
        rtt_writel(rtc, MR, mr & ~(AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN));
-       free_irq(AT91_ID_SYS, rtc);
+       free_irq(rtc->irq, rtc);
 
        rtc_device_unregister(rtc->rtcdev);
 
@@ -423,7 +431,7 @@ static int at91_rtc_suspend(struct platform_device *pdev,
        rtc->imr = mr & (AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN);
        if (rtc->imr) {
                if (device_may_wakeup(&pdev->dev) && (mr & AT91_RTT_ALMIEN)) {
-                       enable_irq_wake(AT91_ID_SYS);
+                       enable_irq_wake(rtc->irq);
                        /* don't let RTTINC cause wakeups */
                        if (mr & AT91_RTT_RTTINCIEN)
                                rtt_writel(rtc, MR, mr & ~AT91_RTT_RTTINCIEN);
@@ -441,7 +449,7 @@ static int at91_rtc_resume(struct platform_device *pdev)
 
        if (rtc->imr) {
                if (device_may_wakeup(&pdev->dev))
-                       disable_irq_wake(AT91_ID_SYS);
+                       disable_irq_wake(rtc->irq);
                mr = rtt_readl(rtc, MR);
                rtt_writel(rtc, MR, mr | rtc->imr);
        }
index dc27598785e5d781ff9790697f8b3a5960921d85..ed38454228c626bdabbea4d4265b22dfbc637864 100644 (file)
@@ -4066,7 +4066,6 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
        spin_lock_init(&instance->cmd_pool_lock);
        spin_lock_init(&instance->hba_lock);
        spin_lock_init(&instance->completion_lock);
-       spin_lock_init(&poll_aen_lock);
 
        mutex_init(&instance->aen_mutex);
        mutex_init(&instance->reset_mutex);
@@ -5392,6 +5391,8 @@ static int __init megasas_init(void)
        printk(KERN_INFO "megasas: %s %s\n", MEGASAS_VERSION,
               MEGASAS_EXT_VERSION);
 
+       spin_lock_init(&poll_aen_lock);
+
        support_poll_for_event = 2;
        support_device_change = 1;
 
index 9d46fcbe7755fd2aa258e3de91c5a8cd76e3079c..b25757d1e91b5ee8ebf2964d3ee424105939d56b 100644 (file)
@@ -2424,10 +2424,13 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc,  int sleep_flag)
        }
 
        /* command line tunables  for max controller queue depth */
-       if (max_queue_depth != -1)
-               max_request_credit = (max_queue_depth < facts->RequestCredit)
-                   ? max_queue_depth : facts->RequestCredit;
-       else
+       if (max_queue_depth != -1 && max_queue_depth != 0) {
+               max_request_credit = min_t(u16, max_queue_depth +
+                       ioc->hi_priority_depth + ioc->internal_depth,
+                       facts->RequestCredit);
+               if (max_request_credit > MAX_HBA_QUEUE_DEPTH)
+                       max_request_credit =  MAX_HBA_QUEUE_DEPTH;
+       } else
                max_request_credit = min_t(u16, facts->RequestCredit,
                    MAX_HBA_QUEUE_DEPTH);
 
@@ -2502,7 +2505,7 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc,  int sleep_flag)
        /* set the scsi host can_queue depth
         * with some internal commands that could be outstanding
         */
-       ioc->shost->can_queue = ioc->scsiio_depth - (2);
+       ioc->shost->can_queue = ioc->scsiio_depth;
        dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "scsi host: "
            "can_queue depth (%d)\n", ioc->name, ioc->shost->can_queue));
 
index 4a6381c87253ec4be307eb47dc8d1ff6ebad4dc3..de2337f255a74ff02888f67e1d202766f257930a 100644 (file)
@@ -42,6 +42,8 @@
 
 #include <trace/events/scsi.h>
 
+static void scsi_eh_done(struct scsi_cmnd *scmd);
+
 #define SENSE_TIMEOUT          (10*HZ)
 
 /*
@@ -241,6 +243,14 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
        if (! scsi_command_normalize_sense(scmd, &sshdr))
                return FAILED;  /* no valid sense data */
 
+       if (scmd->cmnd[0] == TEST_UNIT_READY && scmd->scsi_done != scsi_eh_done)
+               /*
+                * nasty: for mid-layer issued TURs, we need to return the
+                * actual sense data without any recovery attempt.  For eh
+                * issued ones, we need to try to recover and interpret
+                */
+               return SUCCESS;
+
        if (scsi_sense_is_deferred(&sshdr))
                return NEEDS_RETRY;
 
index ffd77739ae3e2bc4bf793fe7b4bb0ed37c3ca8f4..faa790fba1347fc61b0869015e2a28bad4f113f2 100644 (file)
@@ -776,7 +776,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
        }
 
        if (req->cmd_type == REQ_TYPE_BLOCK_PC) { /* SG_IO ioctl from block level */
-               req->errors = result;
                if (result) {
                        if (sense_valid && req->sense) {
                                /*
@@ -792,6 +791,10 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
                        if (!sense_deferred)
                                error = __scsi_error_from_host_byte(cmd, result);
                }
+               /*
+                * __scsi_error_from_host_byte may have reset the host_byte
+                */
+               req->errors = cmd->result;
 
                req->resid_len = scsi_get_resid(cmd);
 
index 56a93794c470ae99d603426f15eaccb1dd3ae727..d947ffc20ceba301eaaf45973fee97dfba7fb7f7 100644 (file)
@@ -764,6 +764,16 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
        sdev->model = (char *) (sdev->inquiry + 16);
        sdev->rev = (char *) (sdev->inquiry + 32);
 
+       if (strncmp(sdev->vendor, "ATA     ", 8) == 0) {
+               /*
+                * sata emulation layer device.  This is a hack to work around
+                * the SATL power management specifications which state that
+                * when the SATL detects the device has gone into standby
+                * mode, it shall respond with NOT READY.
+                */
+               sdev->allow_restart = 1;
+       }
+
        if (*bflags & BLIST_ISROM) {
                sdev->type = TYPE_ROM;
                sdev->removable = 1;
index d0cafd6371996c75e3ee094b2be3168ad14d38b7..f2ffd963f1c348e3986f761b1d2d2b7b75b361e2 100644 (file)
@@ -51,10 +51,12 @@ enum android_alarm_return_flags {
 #define ANDROID_ALARM_WAIT                  _IO('a', 1)
 
 #define ALARM_IOW(c, type, size)            _IOW('a', (c) | ((type) << 4), size)
+#define ALARM_IOR(c, type, size)            _IOR('a', (c) | ((type) << 4), size)
+
 /* Set alarm */
 #define ANDROID_ALARM_SET(type)             ALARM_IOW(2, type, struct timespec)
 #define ANDROID_ALARM_SET_AND_WAIT(type)    ALARM_IOW(3, type, struct timespec)
-#define ANDROID_ALARM_GET_TIME(type)        ALARM_IOW(4, type, struct timespec)
+#define ANDROID_ALARM_GET_TIME(type)        ALARM_IOR(4, type, struct timespec)
 #define ANDROID_ALARM_SET_RTC               _IOW('a', 5, struct timespec)
 #define ANDROID_ALARM_BASE_CMD(cmd)         (cmd & ~(_IOC(0, 0, 0xf0, 0)))
 #define ANDROID_ALARM_IOCTL_TO_TYPE(cmd)    (_IOC_NR(cmd) >> 4)
index 6c81e377262c204ca8a9354743f344ff824267d5..cc8931fde839c491455ed10beaa56529480fbe2d 100644 (file)
@@ -1412,6 +1412,13 @@ static int __devinit dio200_attach_pci(struct comedi_device *dev,
                dev_err(dev->class_dev, "BUG! cannot determine board type!\n");
                return -EINVAL;
        }
+       /*
+        * Need to 'get' the PCI device to match the 'put' in dio200_detach().
+        * TODO: Remove the pci_dev_get() and matching pci_dev_put() once
+        * support for manual attachment of PCI devices via dio200_attach()
+        * has been removed.
+        */
+       pci_dev_get(pci_dev);
        return dio200_pci_common_attach(dev, pci_dev);
 }
 
index aabba9886b7d9276eb1c4233c89ce894685fbd58..f50287903038bb22772c1c99522c69078fc638c0 100644 (file)
@@ -565,6 +565,13 @@ static int __devinit pc236_attach_pci(struct comedi_device *dev,
                dev_err(dev->class_dev, "BUG! cannot determine board type!\n");
                return -EINVAL;
        }
+       /*
+        * Need to 'get' the PCI device to match the 'put' in pc236_detach().
+        * TODO: Remove the pci_dev_get() and matching pci_dev_put() once
+        * support for manual attachment of PCI devices via pc236_attach()
+        * has been removed.
+        */
+       pci_dev_get(pci_dev);
        return pc236_pci_common_attach(dev, pci_dev);
 }
 
index 40ec1ffebba651fda62f23af21ff7b0e3ef097a4..8191c4e28e0a6849fb53e7def7b4fc1639c79c09 100644 (file)
@@ -298,6 +298,13 @@ static int __devinit pc263_attach_pci(struct comedi_device *dev,
                dev_err(dev->class_dev, "BUG! cannot determine board type!\n");
                return -EINVAL;
        }
+       /*
+        * Need to 'get' the PCI device to match the 'put' in pc263_detach().
+        * TODO: Remove the pci_dev_get() and matching pci_dev_put() once
+        * support for manual attachment of PCI devices via pc263_attach()
+        * has been removed.
+        */
+       pci_dev_get(pci_dev);
        return pc263_pci_common_attach(dev, pci_dev);
 }
 
index 4e17f13e57f6530b2bc3cfd1afc4485d0940d880..8bf109e7bb05cef4c9c50669314289cde734c47c 100644 (file)
@@ -1503,6 +1503,13 @@ pci224_attach_pci(struct comedi_device *dev, struct pci_dev *pci_dev)
                        DRIVER_NAME ": BUG! cannot determine board type!\n");
                return -EINVAL;
        }
+       /*
+        * Need to 'get' the PCI device to match the 'put' in pci224_detach().
+        * TODO: Remove the pci_dev_get() and matching pci_dev_put() once
+        * support for manual attachment of PCI devices via pci224_attach()
+        * has been removed.
+        */
+       pci_dev_get(pci_dev);
        return pci224_attach_common(dev, pci_dev, NULL);
 }
 
index 1b67d0c61fa72ff782e02a09dc63b2c14ec5982c..66e74bd12267a565263556f8e6f3389b2c6c41b9 100644 (file)
@@ -2925,6 +2925,13 @@ static int __devinit pci230_attach_pci(struct comedi_device *dev,
                        "amplc_pci230: BUG! cannot determine board type!\n");
                return -EINVAL;
        }
+       /*
+        * Need to 'get' the PCI device to match the 'put' in pci230_detach().
+        * TODO: Remove the pci_dev_get() and matching pci_dev_put() once
+        * support for manual attachment of PCI devices via pci230_attach()
+        * has been removed.
+        */
+       pci_dev_get(pci_dev);
        return pci230_attach_common(dev, pci_dev);
 }
 
index 874e02e47668e60a024b350a72822894be801f1a..67a914a10b55fb2333c9ec747ccd907084fc7b6d 100644 (file)
@@ -378,7 +378,7 @@ das08jr_ao_winsn(struct comedi_device *dev, struct comedi_subdevice *s,
        int chan;
 
        lsb = data[0] & 0xff;
-       msb = (data[0] >> 8) & 0xf;
+       msb = (data[0] >> 8) & 0xff;
 
        chan = CR_CHAN(insn->chanspec);
 
@@ -623,7 +623,7 @@ static const struct das08_board_struct das08_boards[] = {
                .ai = das08_ai_rinsn,
                .ai_nbits = 16,
                .ai_pg = das08_pg_none,
-               .ai_encoding = das08_encode12,
+               .ai_encoding = das08_encode16,
                .ao = das08jr_ao_winsn,
                .ao_nbits = 16,
                .di = das08jr_di_rbits,
@@ -922,6 +922,13 @@ das08_attach_pci(struct comedi_device *dev, struct pci_dev *pdev)
                dev_err(dev->class_dev, "BUG! cannot determine board type!\n");
                return -EINVAL;
        }
+       /*
+        * Need to 'get' the PCI device to match the 'put' in das08_detach().
+        * TODO: Remove the pci_dev_get() and matching pci_dev_put() once
+        * support for manual attachment of PCI devices via das08_attach()
+        * has been removed.
+        */
+       pci_dev_get(pdev);
        return das08_pci_attach_common(dev, pdev);
 }
 
index 18d108fd967a908817efa70d1056fcba35d00458..f3da59063ed2e5a8c934ed201f1add435810028a 100644 (file)
@@ -121,8 +121,10 @@ static int lis3l02dq_get_buffer_element(struct iio_dev *indio_dev,
        if (rx_array == NULL)
                return -ENOMEM;
        ret = lis3l02dq_read_all(indio_dev, rx_array);
-       if (ret < 0)
+       if (ret < 0) {
+               kfree(rx_array);
                return ret;
+       }
        for (i = 0; i < scan_count; i++)
                data[i] = combine_8_to_16(rx_array[i*4+1],
                                        rx_array[i*4+3]);
index 095837285f4fb25732b8986f3d6d25458a9a11a2..19a064d649e3f72a783dd2b2ffc2e6a385175216 100644 (file)
@@ -647,6 +647,8 @@ static ssize_t ad7192_write_frequency(struct device *dev,
        ret = strict_strtoul(buf, 10, &lval);
        if (ret)
                return ret;
+       if (lval == 0)
+               return -EINVAL;
 
        mutex_lock(&indio_dev->mlock);
        if (iio_buffer_enabled(indio_dev)) {
index 93aa431287ac6efb17d9b5172c337f67be30365a..eb8e9d69efd3f39ab42a788f70c8eb1d4b105bf2 100644 (file)
@@ -195,6 +195,8 @@ static ssize_t adis16260_write_frequency(struct device *dev,
        ret = strict_strtol(buf, 10, &val);
        if (ret)
                return ret;
+       if (val == 0)
+               return -EINVAL;
 
        mutex_lock(&indio_dev->mlock);
        if (spi_get_device_id(st->us)) {
index 1f4c17779b5a64e18f48865aa6ecb0e6d49387f4..a618327e06edf2c3374b38b425771b4a823517f2 100644 (file)
@@ -234,6 +234,8 @@ static ssize_t adis16400_write_frequency(struct device *dev,
        ret = strict_strtol(buf, 10, &val);
        if (ret)
                return ret;
+       if (val == 0)
+               return -EINVAL;
 
        mutex_lock(&indio_dev->mlock);
 
index f04ece7fbc2fbe3d33b61dd3387e1cb0b53b23c5..3ccff189f258232cd4704acc2d8674029cf190b1 100644 (file)
@@ -425,6 +425,8 @@ static ssize_t ade7753_write_frequency(struct device *dev,
        ret = strict_strtol(buf, 10, &val);
        if (ret)
                return ret;
+       if (val == 0)
+               return -EINVAL;
 
        mutex_lock(&indio_dev->mlock);
 
index 6cee28a5e87731bee476ab4efc7c0501c89cff45..abb1e9c8d0947adcd1bc6d5567b9620496207c5e 100644 (file)
@@ -445,6 +445,8 @@ static ssize_t ade7754_write_frequency(struct device *dev,
        ret = strict_strtol(buf, 10, &val);
        if (ret)
                return ret;
+       if (val == 0)
+               return -EINVAL;
 
        mutex_lock(&indio_dev->mlock);
 
index b3f7e0fa96124b6ce8d719d63fce6e45df210d0f..eb0a2a98f3886afe1948b284c2ede98094a95754 100644 (file)
@@ -385,6 +385,8 @@ static ssize_t ade7759_write_frequency(struct device *dev,
        ret = strict_strtol(buf, 10, &val);
        if (ret)
                return ret;
+       if (val == 0)
+               return -EINVAL;
 
        mutex_lock(&indio_dev->mlock);
 
index 695ea35f75b0831cc8e2d28369d9ef124f8c2da9..d0a7e408efe93847606d1db21397fab44aba1ae6 100644 (file)
@@ -837,7 +837,7 @@ static int __devinit tegra_nvec_probe(struct platform_device *pdev)
        }
 
        ret = mfd_add_devices(nvec->dev, -1, nvec_devices,
-                             ARRAY_SIZE(nvec_devices), base, 0);
+                             ARRAY_SIZE(nvec_devices), base, 0, NULL);
        if (ret)
                dev_err(nvec->dev, "error adding subdevices\n");
 
index 5e2856c0e0bbf3ead714ca105d06e54e1dff1545..55e9c865585058e9195e3eeef9aa8c554b57c886 100644 (file)
@@ -48,13 +48,20 @@ static inline void copy_timings_omap_to_drm(struct drm_display_mode *mode,
        mode->vsync_end = mode->vsync_start + timings->vsw;
        mode->vtotal = mode->vsync_end + timings->vbp;
 
-       /* note: whether or not it is interlaced, +/- h/vsync, etc,
-        * which should be set in the mode flags, is not exposed in
-        * the omap_video_timings struct.. but hdmi driver tracks
-        * those separately so all we have to have to set the mode
-        * is the way to recover these timings values, and the
-        * omap_dss_driver would do the rest.
-        */
+       mode->flags = 0;
+
+       if (timings->interlace)
+               mode->flags |= DRM_MODE_FLAG_INTERLACE;
+
+       if (timings->hsync_level == OMAPDSS_SIG_ACTIVE_HIGH)
+               mode->flags |= DRM_MODE_FLAG_PHSYNC;
+       else
+               mode->flags |= DRM_MODE_FLAG_NHSYNC;
+
+       if (timings->vsync_level == OMAPDSS_SIG_ACTIVE_HIGH)
+               mode->flags |= DRM_MODE_FLAG_PVSYNC;
+       else
+               mode->flags |= DRM_MODE_FLAG_NVSYNC;
 }
 
 static inline void copy_timings_drm_to_omap(struct omap_video_timings *timings,
@@ -71,6 +78,22 @@ static inline void copy_timings_drm_to_omap(struct omap_video_timings *timings,
        timings->vfp = mode->vsync_start - mode->vdisplay;
        timings->vsw = mode->vsync_end - mode->vsync_start;
        timings->vbp = mode->vtotal - mode->vsync_end;
+
+       timings->interlace = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
+
+       if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+               timings->hsync_level = OMAPDSS_SIG_ACTIVE_HIGH;
+       else
+               timings->hsync_level = OMAPDSS_SIG_ACTIVE_LOW;
+
+       if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+               timings->vsync_level = OMAPDSS_SIG_ACTIVE_HIGH;
+       else
+               timings->vsync_level = OMAPDSS_SIG_ACTIVE_LOW;
+
+       timings->data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
+       timings->de_level = OMAPDSS_SIG_ACTIVE_HIGH;
+       timings->sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES;
 }
 
 static void omap_connector_dpms(struct drm_connector *connector, int mode)
@@ -187,7 +210,7 @@ static int omap_connector_get_modes(struct drm_connector *connector)
                }
        } else {
                struct drm_display_mode *mode = drm_mode_create(dev);
-               struct omap_video_timings timings;
+               struct omap_video_timings timings = {0};
 
                dssdrv->get_timings(dssdev, &timings);
 
@@ -291,7 +314,7 @@ void omap_connector_mode_set(struct drm_connector *connector,
        struct omap_connector *omap_connector = to_omap_connector(connector);
        struct omap_dss_device *dssdev = omap_connector->dssdev;
        struct omap_dss_driver *dssdrv = dssdev->driver;
-       struct omap_video_timings timings;
+       struct omap_video_timings timings = {0};
 
        copy_timings_drm_to_omap(&timings, mode);
 
index d98321945802c8daf8ec71cc3908ec0c44133c7a..758ce0a8d82e03c59d326b5ce0f6b8bdda425b25 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/cdev.h>
 #include <linux/uaccess.h>
 #include <linux/netdevice.h>
+#include <linux/etherdevice.h>
 #include <linux/poll.h>
 #include <linux/sched.h>
 #include "ozconfig.h"
@@ -213,7 +214,7 @@ static int oz_set_active_pd(u8 *addr)
                if (old_pd)
                        oz_pd_put(old_pd);
        } else {
-               if (!memcmp(addr, "\0\0\0\0\0\0", sizeof(addr))) {
+               if (is_zero_ether_addr(addr)) {
                        spin_lock_bh(&g_cdev.lock);
                        pd = g_cdev.active_pd;
                        g_cdev.active_pd = 0;
index 0e26d5f6cf2d57d64eca7d5440af7da0f77443cd..495ee1205e02a9c77aa59a1e7ec5da1cf255d534 100644 (file)
@@ -117,13 +117,8 @@ void r8712_recv_indicatepkt(struct _adapter *padapter,
        if (skb == NULL)
                goto _recv_indicatepkt_drop;
        skb->data = precv_frame->u.hdr.rx_data;
-#ifdef NET_SKBUFF_DATA_USES_OFFSET
-       skb->tail = (sk_buff_data_t)(precv_frame->u.hdr.rx_tail -
-                    precv_frame->u.hdr.rx_head);
-#else
-       skb->tail = (sk_buff_data_t)precv_frame->u.hdr.rx_tail;
-#endif
        skb->len = precv_frame->u.hdr.len;
+       skb_set_tail_pointer(skb, skb->len);
        if ((pattrib->tcpchk_valid == 1) && (pattrib->tcp_chkrpt == 1))
                skb->ip_summed = CHECKSUM_UNNECESSARY;
        else
index e4bdf2a2b5829292e23d8f4675fab023249e16d3..3aa895ec6507f7dde81f1440b4af349e792dc1cd 100644 (file)
@@ -200,7 +200,7 @@ s_vProcessRxMACHeader (
     } else if (!compare_ether_addr(pbyRxBuffer, &pDevice->abySNAP_RFC1042[0])) {
         cbHeaderSize += 6;
         pwType = (PWORD) (pbyRxBufferAddr + cbHeaderSize);
-       if ((*pwType == cpu_to_le16(ETH_P_IPX)) ||
+       if ((*pwType == cpu_to_be16(ETH_P_IPX)) ||
            (*pwType == cpu_to_le16(0xF380))) {
                cbHeaderSize -= 8;
             pwType = (PWORD) (pbyRxBufferAddr + cbHeaderSize);
index bb464527fc1b06e8814b05c23c97e89208cda92e..b6e04e7b629bdc0a63a04ac4f47698abed3cece8 100644 (file)
@@ -1699,7 +1699,7 @@ s_bPacketToWirelessUsb(
     // 802.1H
     if (ntohs(psEthHeader->wType) > ETH_DATA_LEN) {
        if (pDevice->dwDiagRefCount == 0) {
-               if ((psEthHeader->wType == cpu_to_le16(ETH_P_IPX)) ||
+               if ((psEthHeader->wType == cpu_to_be16(ETH_P_IPX)) ||
                    (psEthHeader->wType == cpu_to_le16(0xF380))) {
                        memcpy((PBYTE) (pbyPayloadHead),
                               abySNAP_Bridgetunnel, 6);
@@ -2838,10 +2838,10 @@ int nsDMA_tx_packet(PSDevice pDevice, unsigned int uDMAIdx, struct sk_buff *skb)
     Packet_Type = skb->data[ETH_HLEN+1];
     Descriptor_type = skb->data[ETH_HLEN+1+1+2];
     Key_info = (skb->data[ETH_HLEN+1+1+2+1] << 8)|(skb->data[ETH_HLEN+1+1+2+2]);
-    if (pDevice->sTxEthHeader.wType == cpu_to_le16(ETH_P_PAE)) {
-       /* 802.1x OR eapol-key challenge frame transfer */
-       if (((Protocol_Version == 1) || (Protocol_Version == 2)) &&
-               (Packet_Type == 3)) {
+       if (pDevice->sTxEthHeader.wType == cpu_to_be16(ETH_P_PAE)) {
+               /* 802.1x OR eapol-key challenge frame transfer */
+               if (((Protocol_Version == 1) || (Protocol_Version == 2)) &&
+                       (Packet_Type == 3)) {
                         bTxeapol_key = TRUE;
                        if(!(Key_info & BIT3) &&  //WPA or RSN group-key challenge
                           (Key_info & BIT8) && (Key_info & BIT9)) {    //send 2/2 key
@@ -2987,19 +2987,19 @@ int nsDMA_tx_packet(PSDevice pDevice, unsigned int uDMAIdx, struct sk_buff *skb)
         }
     }
 
-    if (pDevice->sTxEthHeader.wType == cpu_to_le16(ETH_P_PAE)) {
-        if (pDevice->byBBType != BB_TYPE_11A) {
-            pDevice->wCurrentRate = RATE_1M;
-            pDevice->byACKRate = RATE_1M;
-            pDevice->byTopCCKBasicRate = RATE_1M;
-            pDevice->byTopOFDMBasicRate = RATE_6M;
-        } else {
-            pDevice->wCurrentRate = RATE_6M;
-            pDevice->byACKRate = RATE_6M;
-            pDevice->byTopCCKBasicRate = RATE_1M;
-            pDevice->byTopOFDMBasicRate = RATE_6M;
-        }
-    }
+       if (pDevice->sTxEthHeader.wType == cpu_to_be16(ETH_P_PAE)) {
+               if (pDevice->byBBType != BB_TYPE_11A) {
+                       pDevice->wCurrentRate = RATE_1M;
+                       pDevice->byACKRate = RATE_1M;
+                       pDevice->byTopCCKBasicRate = RATE_1M;
+                       pDevice->byTopOFDMBasicRate = RATE_6M;
+               } else {
+                       pDevice->wCurrentRate = RATE_6M;
+                       pDevice->byACKRate = RATE_6M;
+                       pDevice->byTopCCKBasicRate = RATE_1M;
+                       pDevice->byTopOFDMBasicRate = RATE_6M;
+               }
+       }
 
     DBG_PRT(MSG_LEVEL_DEBUG,
            KERN_INFO "dma_tx: pDevice->wCurrentRate = %d\n",
@@ -3015,7 +3015,7 @@ int nsDMA_tx_packet(PSDevice pDevice, unsigned int uDMAIdx, struct sk_buff *skb)
 
     if (bNeedEncryption == TRUE) {
         DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ntohs Pkt Type=%04x\n", ntohs(pDevice->sTxEthHeader.wType));
-       if ((pDevice->sTxEthHeader.wType) == cpu_to_le16(ETH_P_PAE)) {
+       if ((pDevice->sTxEthHeader.wType) == cpu_to_be16(ETH_P_PAE)) {
                bNeedEncryption = FALSE;
             DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Pkt Type=%04x\n", (pDevice->sTxEthHeader.wType));
             if ((pMgmt->eCurrMode == WMAC_MODE_ESS_STA) && (pMgmt->eCurrState == WMAC_STATE_ASSOC)) {
index fabff4d650ef8c5645ed26dfeb5a46e452d26c05..0970127344e60828e569e1ca87ee6c82cb3f11eb 100644 (file)
@@ -327,9 +327,9 @@ int prism2_get_station(struct wiphy *wiphy, struct net_device *dev,
        return result;
 }
 
-int prism2_scan(struct wiphy *wiphy, struct net_device *dev,
-               struct cfg80211_scan_request *request)
+int prism2_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
 {
+       struct net_device *dev = request->wdev->netdev;
        struct prism2_wiphy_private *priv = wiphy_priv(wiphy);
        wlandevice_t *wlandev = dev->ml_priv;
        struct p80211msg_dot11req_scan msg1;
index c214977b4ab48adec84fc260df0116b12b498aa7..52b43b7b83d7b3c9d7e3acfc853912afc64c2b61 100644 (file)
@@ -1251,13 +1251,12 @@ static int zcache_pampd_get_data_and_free(char *data, size_t *bufsize, bool raw,
                                        void *pampd, struct tmem_pool *pool,
                                        struct tmem_oid *oid, uint32_t index)
 {
-       int ret = 0;
-
        BUG_ON(!is_ephemeral(pool));
-       zbud_decompress((struct page *)(data), pampd);
+       if (zbud_decompress((struct page *)(data), pampd) < 0)
+               return -EINVAL;
        zbud_free_and_delist((struct zbud_hdr *)pampd);
        atomic_dec(&zcache_curr_eph_pampd_count);
-       return ret;
+       return 0;
 }
 
 /*
index 0694d9b1bce6a4e066a7bba1b1ef5f870b4b51d7..6aba4395e8d8ffd5aa950c4dfc745791c3f65fc2 100644 (file)
@@ -221,6 +221,7 @@ static int iscsi_login_zero_tsih_s1(
 {
        struct iscsi_session *sess = NULL;
        struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf;
+       int ret;
 
        sess = kzalloc(sizeof(struct iscsi_session), GFP_KERNEL);
        if (!sess) {
@@ -257,9 +258,17 @@ static int iscsi_login_zero_tsih_s1(
                return -ENOMEM;
        }
        spin_lock(&sess_idr_lock);
-       idr_get_new(&sess_idr, NULL, &sess->session_index);
+       ret = idr_get_new(&sess_idr, NULL, &sess->session_index);
        spin_unlock(&sess_idr_lock);
 
+       if (ret < 0) {
+               pr_err("idr_get_new() for sess_idr failed\n");
+               iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+                               ISCSI_LOGIN_STATUS_NO_RESOURCES);
+               kfree(sess);
+               return -ENOMEM;
+       }
+
        sess->creation_time = get_jiffies_64();
        spin_lock_init(&sess->session_stats_lock);
        /*
index 91799973081a3d907cd260792df3f573d1dbec82..41641ba548286e9dbf33f45a02ece5b20eec0d14 100644 (file)
@@ -218,6 +218,13 @@ int target_emulate_set_target_port_groups(struct se_cmd *cmd)
                cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
                return -EINVAL;
        }
+       if (cmd->data_length < 4) {
+               pr_warn("SET TARGET PORT GROUPS parameter list length %u too"
+                       " small\n", cmd->data_length);
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               return -EINVAL;
+       }
+
        buf = transport_kmap_data_sg(cmd);
 
        /*
index cf2c66f3c11690c81ca23e9fd2ce286e76b56e62..9fc9a6006ca082076a6d235dfad676e1e1a11ea9 100644 (file)
@@ -669,6 +669,13 @@ int target_report_luns(struct se_cmd *se_cmd)
        unsigned char *buf;
        u32 lun_count = 0, offset = 8, i;
 
+       if (se_cmd->data_length < 16) {
+               pr_warn("REPORT LUNS allocation length %u too small\n",
+                       se_cmd->data_length);
+               se_cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+               return -EINVAL;
+       }
+
        buf = transport_kmap_data_sg(se_cmd);
        if (!buf)
                return -ENOMEM;
index 76db75e836ede701c2aed6090a212fdf1a08ad10..9ba495477fd24f80bf6643cfae3707525b7c165e 100644 (file)
@@ -325,17 +325,30 @@ static int iblock_execute_unmap(struct se_cmd *cmd)
        struct iblock_dev *ibd = dev->dev_ptr;
        unsigned char *buf, *ptr = NULL;
        sector_t lba;
-       int size = cmd->data_length;
+       int size;
        u32 range;
        int ret = 0;
        int dl, bd_dl;
 
+       if (cmd->data_length < 8) {
+               pr_warn("UNMAP parameter list length %u too small\n",
+                       cmd->data_length);
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               return -EINVAL;
+       }
+
        buf = transport_kmap_data_sg(cmd);
 
        dl = get_unaligned_be16(&buf[0]);
        bd_dl = get_unaligned_be16(&buf[2]);
 
-       size = min(size - 8, bd_dl);
+       size = cmd->data_length - 8;
+       if (bd_dl > size)
+               pr_warn("UNMAP parameter list length %u too small, ignoring bd_dl %u\n",
+                       cmd->data_length, bd_dl);
+       else
+               size = bd_dl;
+
        if (size / 16 > dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
                cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
                ret = -EINVAL;
index 1e946502c378886aa90bafc16239d2c34730b9fb..956c84c6b666498caabf7b60b7404413698c0b14 100644 (file)
@@ -1540,6 +1540,14 @@ static int core_scsi3_decode_spec_i_port(
        tidh_new->dest_local_nexus = 1;
        list_add_tail(&tidh_new->dest_list, &tid_dest_list);
 
+       if (cmd->data_length < 28) {
+               pr_warn("SPC-PR: Received PR OUT parameter list"
+                       " length too small: %u\n", cmd->data_length);
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               ret = -EINVAL;
+               goto out;
+       }
+
        buf = transport_kmap_data_sg(cmd);
        /*
         * For a PERSISTENT RESERVE OUT specify initiator ports payload,
index 5552fa7426bc9b317da906dec2e395bbf854ae78..9d7ce3daa26275a7c08b831ae9b955382e0e9103 100644 (file)
@@ -667,7 +667,8 @@ static void pscsi_free_device(void *p)
        kfree(pdv);
 }
 
-static int pscsi_transport_complete(struct se_cmd *cmd, struct scatterlist *sg)
+static void pscsi_transport_complete(struct se_cmd *cmd, struct scatterlist *sg,
+                                    unsigned char *sense_buffer)
 {
        struct pscsi_dev_virt *pdv = cmd->se_dev->dev_ptr;
        struct scsi_device *sd = pdv->pdv_sd;
@@ -679,7 +680,7 @@ static int pscsi_transport_complete(struct se_cmd *cmd, struct scatterlist *sg)
         * not been allocated because TCM is handling the emulation directly.
         */
        if (!pt)
-               return 0;
+               return;
 
        cdb = &pt->pscsi_cdb[0];
        result = pt->pscsi_result;
@@ -687,11 +688,11 @@ static int pscsi_transport_complete(struct se_cmd *cmd, struct scatterlist *sg)
         * Hack to make sure that Write-Protect modepage is set if R/O mode is
         * forced.
         */
+       if (!cmd->se_deve || !cmd->data_length)
+               goto after_mode_sense;
+
        if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) &&
             (status_byte(result) << 1) == SAM_STAT_GOOD) {
-               if (!cmd->se_deve)
-                       goto after_mode_sense;
-
                if (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY) {
                        unsigned char *buf = transport_kmap_data_sg(cmd);
 
@@ -708,7 +709,7 @@ static int pscsi_transport_complete(struct se_cmd *cmd, struct scatterlist *sg)
        }
 after_mode_sense:
 
-       if (sd->type != TYPE_TAPE)
+       if (sd->type != TYPE_TAPE || !cmd->data_length)
                goto after_mode_select;
 
        /*
@@ -750,10 +751,10 @@ after_mode_sense:
        }
 after_mode_select:
 
-       if (status_byte(result) & CHECK_CONDITION)
-               return 1;
-
-       return 0;
+       if (sense_buffer && (status_byte(result) & CHECK_CONDITION)) {
+               memcpy(sense_buffer, pt->pscsi_sense, TRANSPORT_SENSE_BUFFER);
+               cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
+       }
 }
 
 enum {
@@ -1184,13 +1185,6 @@ fail:
        return -ENOMEM;
 }
 
-static unsigned char *pscsi_get_sense_buffer(struct se_cmd *cmd)
-{
-       struct pscsi_plugin_task *pt = cmd->priv;
-
-       return pt->pscsi_sense;
-}
-
 /*     pscsi_get_device_rev():
  *
  *
@@ -1273,7 +1267,6 @@ static struct se_subsystem_api pscsi_template = {
        .check_configfs_dev_params = pscsi_check_configfs_dev_params,
        .set_configfs_dev_params = pscsi_set_configfs_dev_params,
        .show_configfs_dev_params = pscsi_show_configfs_dev_params,
-       .get_sense_buffer       = pscsi_get_sense_buffer,
        .get_device_rev         = pscsi_get_device_rev,
        .get_device_type        = pscsi_get_device_type,
        .get_blocks             = pscsi_get_blocks,
index 4c861de538c9ddb627356e8aa3ea992b164033cf..388a922c8f6de8f4f2a18f7bea85f5b3590b41a9 100644 (file)
@@ -877,9 +877,11 @@ static int spc_emulate_modesense(struct se_cmd *cmd)
 static int spc_emulate_request_sense(struct se_cmd *cmd)
 {
        unsigned char *cdb = cmd->t_task_cdb;
-       unsigned char *buf;
+       unsigned char *rbuf;
        u8 ua_asc = 0, ua_ascq = 0;
-       int err = 0;
+       unsigned char buf[SE_SENSE_BUF];
+
+       memset(buf, 0, SE_SENSE_BUF);
 
        if (cdb[1] & 0x01) {
                pr_err("REQUEST_SENSE description emulation not"
@@ -888,20 +890,21 @@ static int spc_emulate_request_sense(struct se_cmd *cmd)
                return -ENOSYS;
        }
 
-       buf = transport_kmap_data_sg(cmd);
-
-       if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq)) {
+       rbuf = transport_kmap_data_sg(cmd);
+       if (cmd->scsi_sense_reason != 0) {
+               /*
+                * Out of memory.  We will fail with CHECK CONDITION, so
+                * we must not clear the unit attention condition.
+                */
+               target_complete_cmd(cmd, CHECK_CONDITION);
+               return 0;
+       } else if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq)) {
                /*
                 * CURRENT ERROR, UNIT ATTENTION
                 */
                buf[0] = 0x70;
                buf[SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
 
-               if (cmd->data_length < 18) {
-                       buf[7] = 0x00;
-                       err = -EINVAL;
-                       goto end;
-               }
                /*
                 * The Additional Sense Code (ASC) from the UNIT ATTENTION
                 */
@@ -915,11 +918,6 @@ static int spc_emulate_request_sense(struct se_cmd *cmd)
                buf[0] = 0x70;
                buf[SPC_SENSE_KEY_OFFSET] = NO_SENSE;
 
-               if (cmd->data_length < 18) {
-                       buf[7] = 0x00;
-                       err = -EINVAL;
-                       goto end;
-               }
                /*
                 * NO ADDITIONAL SENSE INFORMATION
                 */
@@ -927,8 +925,11 @@ static int spc_emulate_request_sense(struct se_cmd *cmd)
                buf[7] = 0x0A;
        }
 
-end:
-       transport_kunmap_data_sg(cmd);
+       if (rbuf) {
+               memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
+               transport_kunmap_data_sg(cmd);
+       }
+
        target_complete_cmd(cmd, GOOD);
        return 0;
 }
index 4de3186dc44e99672d3666a24ec145e058183691..269f54488397bd2193bb80869bf9de8ac4c73bf2 100644 (file)
@@ -567,6 +567,34 @@ static void target_complete_failure_work(struct work_struct *work)
        transport_generic_request_failure(cmd);
 }
 
+/*
+ * Used when asking transport to copy Sense Data from the underlying
+ * Linux/SCSI struct scsi_cmnd
+ */
+static unsigned char *transport_get_sense_buffer(struct se_cmd *cmd)
+{
+       unsigned char *buffer = cmd->sense_buffer;
+       struct se_device *dev = cmd->se_dev;
+       u32 offset = 0;
+
+       WARN_ON(!cmd->se_lun);
+
+       if (!dev)
+               return NULL;
+
+       if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION)
+               return NULL;
+
+       offset = cmd->se_tfo->set_fabric_sense_len(cmd, TRANSPORT_SENSE_BUFFER);
+
+       /* Automatically padded */
+       cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset;
+
+       pr_debug("HBA_[%u]_PLUG[%s]: Requesting sense for SAM STATUS: 0x%02x\n",
+               dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status);
+       return &buffer[offset];
+}
+
 void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
 {
        struct se_device *dev = cmd->se_dev;
@@ -580,11 +608,11 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
        cmd->transport_state &= ~CMD_T_BUSY;
 
        if (dev && dev->transport->transport_complete) {
-               if (dev->transport->transport_complete(cmd,
-                               cmd->t_data_sg) != 0) {
-                       cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
+               dev->transport->transport_complete(cmd,
+                               cmd->t_data_sg,
+                               transport_get_sense_buffer(cmd));
+               if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
                        success = 1;
-               }
        }
 
        /*
@@ -1181,15 +1209,20 @@ int target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
                        /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
                        goto out_invalid_cdb_field;
                }
-
+               /*
+                * For the overflow case keep the existing fabric provided
+                * ->data_length.  Otherwise for the underflow case, reset
+                * ->data_length to the smaller SCSI expected data transfer
+                * length.
+                */
                if (size > cmd->data_length) {
                        cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
                        cmd->residual_count = (size - cmd->data_length);
                } else {
                        cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
                        cmd->residual_count = (cmd->data_length - size);
+                       cmd->data_length = size;
                }
-               cmd->data_length = size;
        }
 
        return 0;
@@ -1815,61 +1848,6 @@ execute:
 }
 EXPORT_SYMBOL(target_execute_cmd);
 
-/*
- * Used to obtain Sense Data from underlying Linux/SCSI struct scsi_cmnd
- */
-static int transport_get_sense_data(struct se_cmd *cmd)
-{
-       unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL;
-       struct se_device *dev = cmd->se_dev;
-       unsigned long flags;
-       u32 offset = 0;
-
-       WARN_ON(!cmd->se_lun);
-
-       if (!dev)
-               return 0;
-
-       spin_lock_irqsave(&cmd->t_state_lock, flags);
-       if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
-               spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-               return 0;
-       }
-
-       if (!(cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE))
-               goto out;
-
-       if (!dev->transport->get_sense_buffer) {
-               pr_err("dev->transport->get_sense_buffer is NULL\n");
-               goto out;
-       }
-
-       sense_buffer = dev->transport->get_sense_buffer(cmd);
-       if (!sense_buffer) {
-               pr_err("ITT 0x%08x cmd %p: Unable to locate"
-                       " sense buffer for task with sense\n",
-                       cmd->se_tfo->get_task_tag(cmd), cmd);
-               goto out;
-       }
-
-       spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
-       offset = cmd->se_tfo->set_fabric_sense_len(cmd, TRANSPORT_SENSE_BUFFER);
-
-       memcpy(&buffer[offset], sense_buffer, TRANSPORT_SENSE_BUFFER);
-
-       /* Automatically padded */
-       cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset;
-
-       pr_debug("HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x and sense\n",
-               dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status);
-       return 0;
-
-out:
-       spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-       return -1;
-}
-
 /*
  * Process all commands up to the last received ORDERED task attribute which
  * requires another blocking boundary
@@ -1985,7 +1963,7 @@ static void transport_handle_queue_full(
 static void target_complete_ok_work(struct work_struct *work)
 {
        struct se_cmd *cmd = container_of(work, struct se_cmd, work);
-       int reason = 0, ret;
+       int ret;
 
        /*
         * Check if we need to move delayed/dormant tasks from cmds on the
@@ -2002,23 +1980,19 @@ static void target_complete_ok_work(struct work_struct *work)
                schedule_work(&cmd->se_dev->qf_work_queue);
 
        /*
-        * Check if we need to retrieve a sense buffer from
+        * Check if we need to send a sense buffer from
         * the struct se_cmd in question.
         */
        if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
-               if (transport_get_sense_data(cmd) < 0)
-                       reason = TCM_NON_EXISTENT_LUN;
-
-               if (cmd->scsi_status) {
-                       ret = transport_send_check_condition_and_sense(
-                                       cmd, reason, 1);
-                       if (ret == -EAGAIN || ret == -ENOMEM)
-                               goto queue_full;
+               WARN_ON(!cmd->scsi_status);
+               ret = transport_send_check_condition_and_sense(
+                                       cmd, 0, 1);
+               if (ret == -EAGAIN || ret == -ENOMEM)
+                       goto queue_full;
 
-                       transport_lun_remove_cmd(cmd);
-                       transport_cmd_check_stop_to_fabric(cmd);
-                       return;
-               }
+               transport_lun_remove_cmd(cmd);
+               transport_cmd_check_stop_to_fabric(cmd);
+               return;
        }
        /*
         * Check for a callback, used by amongst other things
@@ -2216,7 +2190,6 @@ void *transport_kmap_data_sg(struct se_cmd *cmd)
        struct page **pages;
        int i;
 
-       BUG_ON(!sg);
        /*
         * We need to take into account a possible offset here for fabrics like
         * tcm_loop who may be using a contig buffer from the SCSI midlayer for
@@ -2224,13 +2197,17 @@ void *transport_kmap_data_sg(struct se_cmd *cmd)
         */
        if (!cmd->t_data_nents)
                return NULL;
-       else if (cmd->t_data_nents == 1)
+
+       BUG_ON(!sg);
+       if (cmd->t_data_nents == 1)
                return kmap(sg_page(sg)) + sg->offset;
 
        /* >1 page. use vmap */
        pages = kmalloc(sizeof(*pages) * cmd->t_data_nents, GFP_KERNEL);
-       if (!pages)
+       if (!pages) {
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
                return NULL;
+       }
 
        /* convert sg[] to pages[] */
        for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) {
@@ -2239,8 +2216,10 @@ void *transport_kmap_data_sg(struct se_cmd *cmd)
 
        cmd->t_data_vmap = vmap(pages, cmd->t_data_nents,  VM_MAP, PAGE_KERNEL);
        kfree(pages);
-       if (!cmd->t_data_vmap)
+       if (!cmd->t_data_vmap) {
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
                return NULL;
+       }
 
        return cmd->t_data_vmap + cmd->t_data_sg[0].offset;
 }
@@ -2326,19 +2305,14 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
         * into the fabric for data transfers, go ahead and complete it right
         * away.
         */
-       if (!cmd->data_length) {
+       if (!cmd->data_length &&
+           cmd->t_task_cdb[0] != REQUEST_SENSE &&
+           cmd->se_dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
                spin_lock_irq(&cmd->t_state_lock);
                cmd->t_state = TRANSPORT_COMPLETE;
                cmd->transport_state |= CMD_T_ACTIVE;
                spin_unlock_irq(&cmd->t_state_lock);
 
-               if (cmd->t_task_cdb[0] == REQUEST_SENSE) {
-                       u8 ua_asc = 0, ua_ascq = 0;
-
-                       core_scsi3_ua_clear_for_request_sense(cmd,
-                                       &ua_asc, &ua_ascq);
-               }
-
                INIT_WORK(&cmd->work, target_complete_ok_work);
                queue_work(target_completion_wq, &cmd->work);
                return 0;
index d5c689d6217e3a2eb46223fa1ffd01b77e15e0df..e309e8b0aaba0c10f0e45d72c57480000ed43d6b 100644 (file)
 #define  UCR4_OREN      (1<<1)  /* Receiver overrun interrupt enable */
 #define  UCR4_DREN      (1<<0)  /* Recv data ready interrupt enable */
 #define  UFCR_RXTL_SHF   0       /* Receiver trigger level shift */
+#define  UFCR_DCEDTE    (1<<6)  /* DCE/DTE mode select */
 #define  UFCR_RFDIV      (7<<7)  /* Reference freq divider mask */
 #define  UFCR_RFDIV_REG(x)     (((x) < 7 ? 6 - (x) : 6) << 7)
 #define  UFCR_TXTL_SHF   10      /* Transmitter trigger level shift */
@@ -667,22 +668,11 @@ static void imx_break_ctl(struct uart_port *port, int break_state)
 static int imx_setup_ufcr(struct imx_port *sport, unsigned int mode)
 {
        unsigned int val;
-       unsigned int ufcr_rfdiv;
-
-       /* set receiver / transmitter trigger level.
-        * RFDIV is set such way to satisfy requested uartclk value
-        */
-       val = TXTL << 10 | RXTL;
-       ufcr_rfdiv = (clk_get_rate(sport->clk_per) + sport->port.uartclk / 2)
-                       / sport->port.uartclk;
-
-       if(!ufcr_rfdiv)
-               ufcr_rfdiv = 1;
-
-       val |= UFCR_RFDIV_REG(ufcr_rfdiv);
 
+       /* set receiver / transmitter trigger level */
+       val = readl(sport->port.membase + UFCR) & (UFCR_RFDIV | UFCR_DCEDTE);
+       val |= TXTL << UFCR_TXTL_SHF | RXTL;
        writel(val, sport->port.membase + UFCR);
-
        return 0;
 }
 
@@ -754,6 +744,7 @@ static int imx_startup(struct uart_port *port)
                }
        }
 
+       spin_lock_irqsave(&sport->port.lock, flags);
        /*
         * Finally, clear and enable interrupts
         */
@@ -807,7 +798,6 @@ static int imx_startup(struct uart_port *port)
        /*
         * Enable modem status interrupts
         */
-       spin_lock_irqsave(&sport->port.lock,flags);
        imx_enable_ms(&sport->port);
        spin_unlock_irqrestore(&sport->port.lock,flags);
 
@@ -837,10 +827,13 @@ static void imx_shutdown(struct uart_port *port)
 {
        struct imx_port *sport = (struct imx_port *)port;
        unsigned long temp;
+       unsigned long flags;
 
+       spin_lock_irqsave(&sport->port.lock, flags);
        temp = readl(sport->port.membase + UCR2);
        temp &= ~(UCR2_TXEN);
        writel(temp, sport->port.membase + UCR2);
+       spin_unlock_irqrestore(&sport->port.lock, flags);
 
        if (USE_IRDA(sport)) {
                struct imxuart_platform_data *pdata;
@@ -869,12 +862,14 @@ static void imx_shutdown(struct uart_port *port)
         * Disable all interrupts, port and break condition.
         */
 
+       spin_lock_irqsave(&sport->port.lock, flags);
        temp = readl(sport->port.membase + UCR1);
        temp &= ~(UCR1_TXMPTYEN | UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN);
        if (USE_IRDA(sport))
                temp &= ~(UCR1_IREN);
 
        writel(temp, sport->port.membase + UCR1);
+       spin_unlock_irqrestore(&sport->port.lock, flags);
 }
 
 static void
@@ -1217,6 +1212,9 @@ imx_console_write(struct console *co, const char *s, unsigned int count)
        struct imx_port *sport = imx_ports[co->index];
        struct imx_port_ucrs old_ucr;
        unsigned int ucr1;
+       unsigned long flags;
+
+       spin_lock_irqsave(&sport->port.lock, flags);
 
        /*
         *      First, save UCR1/2/3 and then disable interrupts
@@ -1242,6 +1240,8 @@ imx_console_write(struct console *co, const char *s, unsigned int count)
        while (!(readl(sport->port.membase + USR2) & USR2_TXDC));
 
        imx_port_ucrs_restore(&sport->port, &old_ucr);
+
+       spin_unlock_irqrestore(&sport->port.lock, flags);
 }
 
 /*
index c7a032a4f0c54b4aa975e4dbc3dba9ba43cabfc9..d214448b677e68d1eeff7847fea48bb46942f31f 100644 (file)
@@ -78,8 +78,7 @@ static inline int ep_to_bit(struct ci13xxx *ci, int n)
 }
 
 /**
- * hw_device_state: enables/disables interrupts & starts/stops device (execute
- *                  without interruption)
+ * hw_device_state: enables/disables interrupts (execute without interruption)
  * @dma: 0 => disable, !0 => enable and set dma engine
  *
  * This function returns an error code
@@ -91,9 +90,7 @@ static int hw_device_state(struct ci13xxx *ci, u32 dma)
                /* interrupt, error, port change, reset, sleep/suspend */
                hw_write(ci, OP_USBINTR, ~0,
                             USBi_UI|USBi_UEI|USBi_PCI|USBi_URI|USBi_SLI);
-               hw_write(ci, OP_USBCMD, USBCMD_RS, USBCMD_RS);
        } else {
-               hw_write(ci, OP_USBCMD, USBCMD_RS, 0);
                hw_write(ci, OP_USBINTR, ~0, 0);
        }
        return 0;
@@ -774,10 +771,7 @@ __acquires(mEp->lock)
 {
        struct ci13xxx_req *mReq, *mReqTemp;
        struct ci13xxx_ep *mEpTemp = mEp;
-       int uninitialized_var(retval);
-
-       if (list_empty(&mEp->qh.queue))
-               return -EINVAL;
+       int retval = 0;
 
        list_for_each_entry_safe(mReq, mReqTemp, &mEp->qh.queue,
                        queue) {
@@ -1420,6 +1414,21 @@ static int ci13xxx_vbus_draw(struct usb_gadget *_gadget, unsigned mA)
        return -ENOTSUPP;
 }
 
+/* Change Data+ pullup status
+ * this func is used by usb_gadget_connect/disconnet
+ */
+static int ci13xxx_pullup(struct usb_gadget *_gadget, int is_on)
+{
+       struct ci13xxx *ci = container_of(_gadget, struct ci13xxx, gadget);
+
+       if (is_on)
+               hw_write(ci, OP_USBCMD, USBCMD_RS, USBCMD_RS);
+       else
+               hw_write(ci, OP_USBCMD, USBCMD_RS, 0);
+
+       return 0;
+}
+
 static int ci13xxx_start(struct usb_gadget *gadget,
                         struct usb_gadget_driver *driver);
 static int ci13xxx_stop(struct usb_gadget *gadget,
@@ -1432,6 +1441,7 @@ static int ci13xxx_stop(struct usb_gadget *gadget,
 static const struct usb_gadget_ops usb_gadget_ops = {
        .vbus_session   = ci13xxx_vbus_session,
        .wakeup         = ci13xxx_wakeup,
+       .pullup         = ci13xxx_pullup,
        .vbus_draw      = ci13xxx_vbus_draw,
        .udc_start      = ci13xxx_start,
        .udc_stop       = ci13xxx_stop,
@@ -1455,7 +1465,12 @@ static int init_eps(struct ci13xxx *ci)
 
                        mEp->ep.name      = mEp->name;
                        mEp->ep.ops       = &usb_ep_ops;
-                       mEp->ep.maxpacket = CTRL_PAYLOAD_MAX;
+                       /*
+                        * for ep0: maxP defined in desc, for other
+                        * eps, maxP is set by epautoconfig() called
+                        * by gadget layer
+                        */
+                       mEp->ep.maxpacket = (unsigned short)~0;
 
                        INIT_LIST_HEAD(&mEp->qh.queue);
                        mEp->qh.ptr = dma_pool_alloc(ci->qh_pool, GFP_KERNEL,
@@ -1475,6 +1490,7 @@ static int init_eps(struct ci13xxx *ci)
                                else
                                        ci->ep0in = mEp;
 
+                               mEp->ep.maxpacket = CTRL_PAYLOAD_MAX;
                                continue;
                        }
 
@@ -1484,6 +1500,17 @@ static int init_eps(struct ci13xxx *ci)
        return retval;
 }
 
+static void destroy_eps(struct ci13xxx *ci)
+{
+       int i;
+
+       for (i = 0; i < ci->hw_ep_max; i++) {
+               struct ci13xxx_ep *mEp = &ci->ci13xxx_ep[i];
+
+               dma_pool_free(ci->qh_pool, mEp->qh.ptr, mEp->qh.dma);
+       }
+}
+
 /**
  * ci13xxx_start: register a gadget driver
  * @gadget: our gadget
@@ -1691,7 +1718,7 @@ static int udc_start(struct ci13xxx *ci)
        if (ci->platdata->flags & CI13XXX_REQUIRE_TRANSCEIVER) {
                if (ci->transceiver == NULL) {
                        retval = -ENODEV;
-                       goto free_pools;
+                       goto destroy_eps;
                }
        }
 
@@ -1729,7 +1756,7 @@ static int udc_start(struct ci13xxx *ci)
 
 remove_trans:
        if (!IS_ERR_OR_NULL(ci->transceiver)) {
-               otg_set_peripheral(ci->transceiver->otg, &ci->gadget);
+               otg_set_peripheral(ci->transceiver->otg, NULL);
                if (ci->global_phy)
                        usb_put_phy(ci->transceiver);
        }
@@ -1742,6 +1769,8 @@ unreg_device:
 put_transceiver:
        if (!IS_ERR_OR_NULL(ci->transceiver) && ci->global_phy)
                usb_put_phy(ci->transceiver);
+destroy_eps:
+       destroy_eps(ci);
 free_pools:
        dma_pool_destroy(ci->td_pool);
 free_qh_pool:
@@ -1756,18 +1785,12 @@ free_qh_pool:
  */
 static void udc_stop(struct ci13xxx *ci)
 {
-       int i;
-
        if (ci == NULL)
                return;
 
        usb_del_gadget_udc(&ci->gadget);
 
-       for (i = 0; i < ci->hw_ep_max; i++) {
-               struct ci13xxx_ep *mEp = &ci->ci13xxx_ep[i];
-
-               dma_pool_free(ci->qh_pool, mEp->qh.ptr, mEp->qh.dma);
-       }
+       destroy_eps(ci);
 
        dma_pool_destroy(ci->td_pool);
        dma_pool_destroy(ci->qh_pool);
index 65a55abb791f53dd458f0b23c77af5497142ec6d..5f0cb417b736bb4b61c73afc52672029af59cbda 100644 (file)
@@ -109,12 +109,14 @@ static struct usb_driver wdm_driver;
 /* return intfdata if we own the interface, else look up intf in the list */
 static struct wdm_device *wdm_find_device(struct usb_interface *intf)
 {
-       struct wdm_device *desc = NULL;
+       struct wdm_device *desc;
 
        spin_lock(&wdm_device_list_lock);
        list_for_each_entry(desc, &wdm_device_list, device_list)
                if (desc->intf == intf)
-                       break;
+                       goto found;
+       desc = NULL;
+found:
        spin_unlock(&wdm_device_list_lock);
 
        return desc;
@@ -122,12 +124,14 @@ static struct wdm_device *wdm_find_device(struct usb_interface *intf)
 
 static struct wdm_device *wdm_find_device_by_minor(int minor)
 {
-       struct wdm_device *desc = NULL;
+       struct wdm_device *desc;
 
        spin_lock(&wdm_device_list_lock);
        list_for_each_entry(desc, &wdm_device_list, device_list)
                if (desc->intf->minor == minor)
-                       break;
+                       goto found;
+       desc = NULL;
+found:
        spin_unlock(&wdm_device_list_lock);
 
        return desc;
index f15501f4c585694c0c513eac78f692558ae8720f..e77a8e8eaa233b9b4febd38ef8c01ace8ac832a5 100644 (file)
@@ -71,6 +71,10 @@ static const struct usb_device_id usb_quirk_list[] = {
        { USB_DEVICE(0x04b4, 0x0526), .driver_info =
                        USB_QUIRK_CONFIG_INTF_STRINGS },
 
+       /* Microchip Joss Optical infrared touchboard device */
+       { USB_DEVICE(0x04d8, 0x000c), .driver_info =
+                       USB_QUIRK_CONFIG_INTF_STRINGS },
+
        /* Samsung Android phone modem - ID conflict with SPH-I500 */
        { USB_DEVICE(0x04e8, 0x6601), .driver_info =
                        USB_QUIRK_CONFIG_INTF_STRINGS },
index c34452a7304f9dec269ba64706ab999559e67eff..a68ff53124dc15e88ab3ceaa35e3fbf7e97cc978 100644 (file)
@@ -436,16 +436,21 @@ static int __devinit dwc3_probe(struct platform_device *pdev)
                dev_err(dev, "missing IRQ\n");
                return -ENODEV;
        }
-       dwc->xhci_resources[1] = *res;
+       dwc->xhci_resources[1].start = res->start;
+       dwc->xhci_resources[1].end = res->end;
+       dwc->xhci_resources[1].flags = res->flags;
+       dwc->xhci_resources[1].name = res->name;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!res) {
                dev_err(dev, "missing memory resource\n");
                return -ENODEV;
        }
-       dwc->xhci_resources[0] = *res;
+       dwc->xhci_resources[0].start = res->start;
        dwc->xhci_resources[0].end = dwc->xhci_resources[0].start +
                                        DWC3_XHCI_REGS_END;
+       dwc->xhci_resources[0].flags = res->flags;
+       dwc->xhci_resources[0].name = res->name;
 
         /*
          * Request memory region but exclude xHCI regs,
index 9b94886b66e589ee3040556bf284985a01a64bc6..e4d5ca86b9da5413d1c3c52079b4a2abcd9481ad 100644 (file)
@@ -720,7 +720,6 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc,
                transferred = min_t(u32, ur->length,
                                transfer_size - length);
                memcpy(ur->buf, dwc->ep0_bounce, transferred);
-               dwc->ep0_bounced = false;
        } else {
                transferred = ur->length - length;
        }
index 58fdfad96b4d61b2cc86de9763a135dc81fcd908..c2813c2b005a8e223f93ff50a3215e24172904c2 100644 (file)
@@ -263,8 +263,11 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
        if (req->request.status == -EINPROGRESS)
                req->request.status = status;
 
-       usb_gadget_unmap_request(&dwc->gadget, &req->request,
-                       req->direction);
+       if (dwc->ep0_bounced && dep->number == 0)
+               dwc->ep0_bounced = false;
+       else
+               usb_gadget_unmap_request(&dwc->gadget, &req->request,
+                               req->direction);
 
        dev_dbg(dwc->dev, "request %p from %s completed %d/%d ===> %d\n",
                        req, dep->name, req->request.actual,
@@ -1026,6 +1029,7 @@ static void __dwc3_gadget_start_isoc(struct dwc3 *dwc,
        if (list_empty(&dep->request_list)) {
                dev_vdbg(dwc->dev, "ISOC ep %s run out for requests.\n",
                        dep->name);
+               dep->flags |= DWC3_EP_PENDING_REQUEST;
                return;
        }
 
@@ -1089,6 +1093,17 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
        if (dep->flags & DWC3_EP_PENDING_REQUEST) {
                int     ret;
 
+               /*
+                * If xfernotready is already elapsed and it is a case
+                * of isoc transfer, then issue END TRANSFER, so that
+                * you can receive xfernotready again and can have
+                * notion of current microframe.
+                */
+               if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
+                       dwc3_stop_active_transfer(dwc, dep->number);
+                       return 0;
+               }
+
                ret = __dwc3_gadget_kick_transfer(dep, 0, true);
                if (ret && ret != -EBUSY) {
                        struct dwc3     *dwc = dep->dwc;
index c9e66dfb02e6642c1e88149884857948b44869e8..1e35963bd4edc29f735319a76e0a4eb7052b27f5 100644 (file)
@@ -475,8 +475,7 @@ static int at91_ep_enable(struct usb_ep *_ep,
        unsigned long   flags;
 
        if (!_ep || !ep
-                       || !desc || ep->ep.desc
-                       || _ep->name == ep0name
+                       || !desc || _ep->name == ep0name
                        || desc->bDescriptorType != USB_DT_ENDPOINT
                        || (maxpacket = usb_endpoint_maxp(desc)) == 0
                        || maxpacket > ep->maxpacket) {
@@ -530,7 +529,6 @@ ok:
        tmp |= AT91_UDP_EPEDS;
        __raw_writel(tmp, ep->creg);
 
-       ep->ep.desc = desc;
        ep->ep.maxpacket = maxpacket;
 
        /*
@@ -1635,7 +1633,6 @@ static int at91_start(struct usb_gadget *gadget,
        udc->driver = driver;
        udc->gadget.dev.driver = &driver->driver;
        udc->gadget.dev.of_node = udc->pdev->dev.of_node;
-       dev_set_drvdata(&udc->gadget.dev, &driver->driver);
        udc->enabled = 1;
        udc->selfpowered = 1;
 
@@ -1656,7 +1653,6 @@ static int at91_stop(struct usb_gadget *gadget,
        spin_unlock_irqrestore(&udc->lock, flags);
 
        udc->gadget.dev.driver = NULL;
-       dev_set_drvdata(&udc->gadget.dev, NULL);
        udc->driver = NULL;
 
        DBG("unbound from %s\n", driver->driver.name);
index b799106027adfc5d75a45244a47fbd015bedfd20..afdbb1cbf5d94d972c52f57f4099a1bb0202bef5 100644 (file)
@@ -1916,6 +1916,27 @@ done:
        return retval;
 }
 
+/* usb 3.0 root hub device descriptor */
+struct {
+       struct usb_bos_descriptor bos;
+       struct usb_ss_cap_descriptor ss_cap;
+} __packed usb3_bos_desc = {
+
+       .bos = {
+               .bLength                = USB_DT_BOS_SIZE,
+               .bDescriptorType        = USB_DT_BOS,
+               .wTotalLength           = cpu_to_le16(sizeof(usb3_bos_desc)),
+               .bNumDeviceCaps         = 1,
+       },
+       .ss_cap = {
+               .bLength                = USB_DT_USB_SS_CAP_SIZE,
+               .bDescriptorType        = USB_DT_DEVICE_CAPABILITY,
+               .bDevCapabilityType     = USB_SS_CAP_TYPE,
+               .wSpeedSupported        = cpu_to_le16(USB_5GBPS_OPERATION),
+               .bFunctionalitySupport  = ilog2(USB_5GBPS_OPERATION),
+       },
+};
+
 static inline void
 ss_hub_descriptor(struct usb_hub_descriptor *desc)
 {
@@ -2006,6 +2027,18 @@ static int dummy_hub_control(
                else
                        hub_descriptor((struct usb_hub_descriptor *) buf);
                break;
+
+       case DeviceRequest | USB_REQ_GET_DESCRIPTOR:
+               if (hcd->speed != HCD_USB3)
+                       goto error;
+
+               if ((wValue >> 8) != USB_DT_BOS)
+                       goto error;
+
+               memcpy(buf, &usb3_bos_desc, sizeof(usb3_bos_desc));
+               retval = sizeof(usb3_bos_desc);
+               break;
+
        case GetHubStatus:
                *(__le32 *) buf = cpu_to_le32(0);
                break;
@@ -2503,10 +2536,8 @@ static int dummy_hcd_probe(struct platform_device *pdev)
        hs_hcd->has_tt = 1;
 
        retval = usb_add_hcd(hs_hcd, 0, 0);
-       if (retval != 0) {
-               usb_put_hcd(hs_hcd);
-               return retval;
-       }
+       if (retval)
+               goto put_usb2_hcd;
 
        if (mod_data.is_super_speed) {
                ss_hcd = usb_create_shared_hcd(&dummy_hcd, &pdev->dev,
@@ -2525,6 +2556,8 @@ static int dummy_hcd_probe(struct platform_device *pdev)
 put_usb3_hcd:
        usb_put_hcd(ss_hcd);
 dealloc_usb2_hcd:
+       usb_remove_hcd(hs_hcd);
+put_usb2_hcd:
        usb_put_hcd(hs_hcd);
        the_controller.hs_hcd = the_controller.ss_hcd = NULL;
        return retval;
index 8adc79d1b40277ac15092a53bac28749627c60cf..829aba75a6dfef28f1ce79055f5b73d6df883c68 100644 (file)
 /* Debugging ****************************************************************/
 
 #ifdef VERBOSE_DEBUG
+#ifndef pr_vdebug
 #  define pr_vdebug pr_debug
+#endif /* pr_vdebug */
 #  define ffs_dump_mem(prefix, ptr, len) \
        print_hex_dump_bytes(pr_fmt(prefix ": "), DUMP_PREFIX_NONE, ptr, len)
 #else
+#ifndef pr_vdebug
 #  define pr_vdebug(...)                 do { } while (0)
+#endif /* pr_vdebug */
 #  define ffs_dump_mem(prefix, ptr, len) do { } while (0)
 #endif /* VERBOSE_DEBUG */
 
index b13e0bb5f5b8131de7cc26a928449215f2e0334c..0bb617e1dda2e0ecf2a440b6494761338f15004c 100644 (file)
@@ -3599,6 +3599,7 @@ static int __devinit s3c_hsotg_probe(struct platform_device *pdev)
 
        if (hsotg->num_of_eps == 0) {
                dev_err(dev, "wrong number of EPs (zero)\n");
+               ret = -EINVAL;
                goto err_supplies;
        }
 
@@ -3606,6 +3607,7 @@ static int __devinit s3c_hsotg_probe(struct platform_device *pdev)
                      GFP_KERNEL);
        if (!eps) {
                dev_err(dev, "cannot get memory\n");
+               ret = -ENOMEM;
                goto err_supplies;
        }
 
@@ -3622,6 +3624,7 @@ static int __devinit s3c_hsotg_probe(struct platform_device *pdev)
                                                     GFP_KERNEL);
        if (!hsotg->ctrl_req) {
                dev_err(dev, "failed to allocate ctrl req\n");
+               ret = -ENOMEM;
                goto err_ep_mem;
        }
 
index 5b3f5fffea92d241b50587b885c24bf69f83e878..da6d479ff9a61e5f57950be3b78e052a029eaa32 100644 (file)
@@ -132,11 +132,15 @@ static unsigned   n_ports;
 
 
 #ifdef VERBOSE_DEBUG
+#ifndef pr_vdebug
 #define pr_vdebug(fmt, arg...) \
        pr_debug(fmt, ##arg)
+#endif /* pr_vdebug */
 #else
+#ifndef pr_vdebig
 #define pr_vdebug(fmt, arg...) \
        ({ if (0) pr_debug(fmt, ##arg); })
+#endif /* pr_vdebug */
 #endif
 
 /*-------------------------------------------------------------------------*/
index 9bc39ca460c80bdfe275b313a47f4683a81e4f3c..4b66374bdc8e33f74e20ff7bc30b7894ab24698e 100644 (file)
@@ -128,9 +128,17 @@ qh_refresh (struct ehci_hcd *ehci, struct ehci_qh *qh)
        else {
                qtd = list_entry (qh->qtd_list.next,
                                struct ehci_qtd, qtd_list);
-               /* first qtd may already be partially processed */
-               if (cpu_to_hc32(ehci, qtd->qtd_dma) == qh->hw->hw_current)
+               /*
+                * first qtd may already be partially processed.
+                * If we come here during unlink, the QH overlay region
+                * might have reference to the just unlinked qtd. The
+                * qtd is updated in qh_completions(). Update the QH
+                * overlay here.
+                */
+               if (cpu_to_hc32(ehci, qtd->qtd_dma) == qh->hw->hw_current) {
+                       qh->hw->hw_qtd_next = qtd->hw_next;
                        qtd = NULL;
+               }
        }
 
        if (qtd)
index a665b3eaa74672f28e7d011b1ea4da265667187f..aaa8d2bce21702aa8d7844bb343de68bd30f0a3d 100644 (file)
@@ -570,6 +570,16 @@ static int __devinit ohci_hcd_at91_drv_probe(struct platform_device *pdev)
 
        if (pdata) {
                at91_for_each_port(i) {
+                       /*
+                        * do not configure PIO if not in relation with
+                        * real USB port on board
+                        */
+                       if (i >= pdata->ports) {
+                               pdata->vbus_pin[i] = -EINVAL;
+                               pdata->overcurrent_pin[i] = -EINVAL;
+                               break;
+                       }
+
                        if (!gpio_is_valid(pdata->vbus_pin[i]))
                                continue;
                        gpio = pdata->vbus_pin[i];
index c5e9e4a76f148d4eed0c4785cf46fb38d143a074..966d1484ee79a2db8c5e78e135c5e0760814d183 100644 (file)
@@ -75,7 +75,9 @@
 #define        NB_PIF0_PWRDOWN_1       0x01100013
 
 #define USB_INTEL_XUSB2PR      0xD0
+#define USB_INTEL_USB2PRM      0xD4
 #define USB_INTEL_USB3_PSSEN   0xD8
+#define USB_INTEL_USB3PRM      0xDC
 
 static struct amd_chipset_info {
        struct pci_dev  *nb_dev;
@@ -772,10 +774,18 @@ void usb_enable_xhci_ports(struct pci_dev *xhci_pdev)
                return;
        }
 
-       ports_available = 0xffffffff;
+       /* Read USB3PRM, the USB 3.0 Port Routing Mask Register
+        * Indicate the ports that can be changed from OS.
+        */
+       pci_read_config_dword(xhci_pdev, USB_INTEL_USB3PRM,
+                       &ports_available);
+
+       dev_dbg(&xhci_pdev->dev, "Configurable ports to enable SuperSpeed: 0x%x\n",
+                       ports_available);
+
        /* Write USB3_PSSEN, the USB 3.0 Port SuperSpeed Enable
-        * Register, to turn on SuperSpeed terminations for all
-        * available ports.
+        * Register, to turn on SuperSpeed terminations for the
+        * switchable ports.
         */
        pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN,
                        cpu_to_le32(ports_available));
@@ -785,7 +795,16 @@ void usb_enable_xhci_ports(struct pci_dev *xhci_pdev)
        dev_dbg(&xhci_pdev->dev, "USB 3.0 ports that are now enabled "
                        "under xHCI: 0x%x\n", ports_available);
 
-       ports_available = 0xffffffff;
+       /* Read XUSB2PRM, xHCI USB 2.0 Port Routing Mask Register
+        * Indicate the USB 2.0 ports to be controlled by the xHCI host.
+        */
+
+       pci_read_config_dword(xhci_pdev, USB_INTEL_USB2PRM,
+                       &ports_available);
+
+       dev_dbg(&xhci_pdev->dev, "Configurable USB 2.0 ports to hand over to xCHI: 0x%x\n",
+                       ports_available);
+
        /* Write XUSB2PR, the xHC USB 2.0 Port Routing Register, to
         * switch the USB 2.0 power and data lines over to the xHCI
         * host.
@@ -822,12 +841,12 @@ static void __devinit quirk_usb_handoff_xhci(struct pci_dev *pdev)
        void __iomem *op_reg_base;
        u32 val;
        int timeout;
+       int len = pci_resource_len(pdev, 0);
 
        if (!mmio_resource_enabled(pdev, 0))
                return;
 
-       base = ioremap_nocache(pci_resource_start(pdev, 0),
-                               pci_resource_len(pdev, 0));
+       base = ioremap_nocache(pci_resource_start(pdev, 0), len);
        if (base == NULL)
                return;
 
@@ -837,9 +856,17 @@ static void __devinit quirk_usb_handoff_xhci(struct pci_dev *pdev)
         */
        ext_cap_offset = xhci_find_next_cap_offset(base, XHCI_HCC_PARAMS_OFFSET);
        do {
+               if ((ext_cap_offset + sizeof(val)) > len) {
+                       /* We're reading garbage from the controller */
+                       dev_warn(&pdev->dev,
+                                "xHCI controller failing to respond");
+                       return;
+               }
+
                if (!ext_cap_offset)
                        /* We've reached the end of the extended capabilities */
                        goto hc_init;
+
                val = readl(base + ext_cap_offset);
                if (XHCI_EXT_CAPS_ID(val) == XHCI_EXT_CAPS_LEGACY)
                        break;
@@ -870,9 +897,10 @@ static void __devinit quirk_usb_handoff_xhci(struct pci_dev *pdev)
        /* Disable any BIOS SMIs and clear all SMI events*/
        writel(val, base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET);
 
+hc_init:
        if (usb_is_intel_switchable_xhci(pdev))
                usb_enable_xhci_ports(pdev);
-hc_init:
+
        op_reg_base = base + XHCI_HC_LENGTH(readl(base));
 
        /* Wait for the host controller to be ready before writing any
index ef004a5de20f176c27801f83bd9ae2456570e6b1..7f69a39163ce3b5560f9e0e24a0a9d86e7f77cef 100644 (file)
@@ -15,6 +15,7 @@ void usb_disable_xhci_ports(struct pci_dev *xhci_pdev);
 static inline void usb_amd_quirk_pll_disable(void) {}
 static inline void usb_amd_quirk_pll_enable(void) {}
 static inline void usb_amd_dev_put(void) {}
+static inline void usb_disable_xhci_ports(struct pci_dev *xhci_pdev) {}
 #endif  /* CONFIG_PCI */
 
 #endif  /*  __LINUX_USB_PCI_QUIRKS_H  */
index 74bfc868b7ade609dc67cea66195bd499f92b067..d5eb357aa5c42cff5575bdf1297f3ebbafa49d85 100644 (file)
@@ -493,11 +493,48 @@ static void xhci_hub_report_link_state(u32 *status, u32 status_reg)
                 * when this bit is set.
                 */
                pls |= USB_PORT_STAT_CONNECTION;
+       } else {
+               /*
+                * If CAS bit isn't set but the Port is already at
+                * Compliance Mode, fake a connection so the USB core
+                * notices the Compliance state and resets the port.
+                * This resolves an issue generated by the SN65LVPE502CP
+                * in which sometimes the port enters compliance mode
+                * caused by a delay on the host-device negotiation.
+                */
+               if (pls == USB_SS_PORT_LS_COMP_MOD)
+                       pls |= USB_PORT_STAT_CONNECTION;
        }
+
        /* update status field */
        *status |= pls;
 }
 
+/*
+ * Function for Compliance Mode Quirk.
+ *
+ * This Function verifies if all xhc USB3 ports have entered U0, if so,
+ * the compliance mode timer is deleted. A port won't enter
+ * compliance mode if it has previously entered U0.
+ */
+void xhci_del_comp_mod_timer(struct xhci_hcd *xhci, u32 status, u16 wIndex)
+{
+       u32 all_ports_seen_u0 = ((1 << xhci->num_usb3_ports)-1);
+       bool port_in_u0 = ((status & PORT_PLS_MASK) == XDEV_U0);
+
+       if (!(xhci->quirks & XHCI_COMP_MODE_QUIRK))
+               return;
+
+       if ((xhci->port_status_u0 != all_ports_seen_u0) && port_in_u0) {
+               xhci->port_status_u0 |= 1 << wIndex;
+               if (xhci->port_status_u0 == all_ports_seen_u0) {
+                       del_timer_sync(&xhci->comp_mode_recovery_timer);
+                       xhci_dbg(xhci, "All USB3 ports have entered U0 already!\n");
+                       xhci_dbg(xhci, "Compliance Mode Recovery Timer Deleted.\n");
+               }
+       }
+}
+
 int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
                u16 wIndex, char *buf, u16 wLength)
 {
@@ -651,6 +688,11 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
                /* Update Port Link State for super speed ports*/
                if (hcd->speed == HCD_USB3) {
                        xhci_hub_report_link_state(&status, temp);
+                       /*
+                        * Verify if all USB3 Ports Have entered U0 already.
+                        * Delete Compliance Mode Timer if so.
+                        */
+                       xhci_del_comp_mod_timer(xhci, temp, wIndex);
                }
                if (bus_state->port_c_suspend & (1 << wIndex))
                        status |= 1 << USB_PORT_FEAT_C_SUSPEND;
index 689bc18b051d6c1d1ae8e381e887a01edbdfc9e9..df90fe51b4aa2d406b8b2f8bfa591c410df89a35 100644 (file)
@@ -118,7 +118,7 @@ static int xhci_plat_probe(struct platform_device *pdev)
                goto put_hcd;
        }
 
-       hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
+       hcd->regs = ioremap_nocache(hcd->rsrc_start, hcd->rsrc_len);
        if (!hcd->regs) {
                dev_dbg(&pdev->dev, "error mapping memory\n");
                ret = -EFAULT;
index c59d5b5b6c7d227a8005ca520e8b9badad5207b1..6ece0ed288d4da9398bb96ecc8223f31b6ce8e0c 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/slab.h>
+#include <linux/dmi.h>
 
 #include "xhci.h"
 
@@ -398,6 +399,95 @@ static void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
 
 #endif
 
+static void compliance_mode_recovery(unsigned long arg)
+{
+       struct xhci_hcd *xhci;
+       struct usb_hcd *hcd;
+       u32 temp;
+       int i;
+
+       xhci = (struct xhci_hcd *)arg;
+
+       for (i = 0; i < xhci->num_usb3_ports; i++) {
+               temp = xhci_readl(xhci, xhci->usb3_ports[i]);
+               if ((temp & PORT_PLS_MASK) == USB_SS_PORT_LS_COMP_MOD) {
+                       /*
+                        * Compliance Mode Detected. Letting USB Core
+                        * handle the Warm Reset
+                        */
+                       xhci_dbg(xhci, "Compliance Mode Detected->Port %d!\n",
+                                       i + 1);
+                       xhci_dbg(xhci, "Attempting Recovery routine!\n");
+                       hcd = xhci->shared_hcd;
+
+                       if (hcd->state == HC_STATE_SUSPENDED)
+                               usb_hcd_resume_root_hub(hcd);
+
+                       usb_hcd_poll_rh_status(hcd);
+               }
+       }
+
+       if (xhci->port_status_u0 != ((1 << xhci->num_usb3_ports)-1))
+               mod_timer(&xhci->comp_mode_recovery_timer,
+                       jiffies + msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
+}
+
+/*
+ * Quirk to work around issue generated by the SN65LVPE502CP USB3.0 re-driver
+ * that causes ports behind that hardware to enter compliance mode sometimes.
+ * The quirk creates a timer that polls every 2 seconds the link state of
+ * each host controller's port and recovers it by issuing a Warm reset
+ * if Compliance mode is detected, otherwise the port will become "dead" (no
+ * device connections or disconnections will be detected anymore). Becasue no
+ * status event is generated when entering compliance mode (per xhci spec),
+ * this quirk is needed on systems that have the failing hardware installed.
+ */
+static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci)
+{
+       xhci->port_status_u0 = 0;
+       init_timer(&xhci->comp_mode_recovery_timer);
+
+       xhci->comp_mode_recovery_timer.data = (unsigned long) xhci;
+       xhci->comp_mode_recovery_timer.function = compliance_mode_recovery;
+       xhci->comp_mode_recovery_timer.expires = jiffies +
+                       msecs_to_jiffies(COMP_MODE_RCVRY_MSECS);
+
+       set_timer_slack(&xhci->comp_mode_recovery_timer,
+                       msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
+       add_timer(&xhci->comp_mode_recovery_timer);
+       xhci_dbg(xhci, "Compliance Mode Recovery Timer Initialized.\n");
+}
+
+/*
+ * This function identifies the systems that have installed the SN65LVPE502CP
+ * USB3.0 re-driver and that need the Compliance Mode Quirk.
+ * Systems:
+ * Vendor: Hewlett-Packard -> System Models: Z420, Z620 and Z820
+ */
+static bool compliance_mode_recovery_timer_quirk_check(void)
+{
+       const char *dmi_product_name, *dmi_sys_vendor;
+
+       dmi_product_name = dmi_get_system_info(DMI_PRODUCT_NAME);
+       dmi_sys_vendor = dmi_get_system_info(DMI_SYS_VENDOR);
+
+       if (!(strstr(dmi_sys_vendor, "Hewlett-Packard")))
+               return false;
+
+       if (strstr(dmi_product_name, "Z420") ||
+                       strstr(dmi_product_name, "Z620") ||
+                       strstr(dmi_product_name, "Z820"))
+               return true;
+
+       return false;
+}
+
+static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci)
+{
+       return (xhci->port_status_u0 == ((1 << xhci->num_usb3_ports)-1));
+}
+
+
 /*
  * Initialize memory for HCD and xHC (one-time init).
  *
@@ -421,6 +511,12 @@ int xhci_init(struct usb_hcd *hcd)
        retval = xhci_mem_init(xhci, GFP_KERNEL);
        xhci_dbg(xhci, "Finished xhci_init\n");
 
+       /* Initializing Compliance Mode Recovery Data If Needed */
+       if (compliance_mode_recovery_timer_quirk_check()) {
+               xhci->quirks |= XHCI_COMP_MODE_QUIRK;
+               compliance_mode_recovery_timer_init(xhci);
+       }
+
        return retval;
 }
 
@@ -629,6 +725,11 @@ void xhci_stop(struct usb_hcd *hcd)
        del_timer_sync(&xhci->event_ring_timer);
 #endif
 
+       /* Deleting Compliance Mode Recovery Timer */
+       if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
+                       (!(xhci_all_ports_seen_u0(xhci))))
+               del_timer_sync(&xhci->comp_mode_recovery_timer);
+
        if (xhci->quirks & XHCI_AMD_PLL_FIX)
                usb_amd_dev_put();
 
@@ -659,7 +760,7 @@ void xhci_shutdown(struct usb_hcd *hcd)
 {
        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
 
-       if (xhci->quirks && XHCI_SPURIOUS_REBOOT)
+       if (xhci->quirks & XHCI_SPURIOUS_REBOOT)
                usb_disable_xhci_ports(to_pci_dev(hcd->self.controller));
 
        spin_lock_irq(&xhci->lock);
@@ -806,6 +907,16 @@ int xhci_suspend(struct xhci_hcd *xhci)
        }
        spin_unlock_irq(&xhci->lock);
 
+       /*
+        * Deleting Compliance Mode Recovery Timer because the xHCI Host
+        * is about to be suspended.
+        */
+       if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
+                       (!(xhci_all_ports_seen_u0(xhci)))) {
+               del_timer_sync(&xhci->comp_mode_recovery_timer);
+               xhci_dbg(xhci, "Compliance Mode Recovery Timer Deleted!\n");
+       }
+
        /* step 5: remove core well power */
        /* synchronize irq when using MSI-X */
        xhci_msix_sync_irqs(xhci);
@@ -938,6 +1049,16 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
                usb_hcd_resume_root_hub(hcd);
                usb_hcd_resume_root_hub(xhci->shared_hcd);
        }
+
+       /*
+        * If system is subject to the Quirk, Compliance Mode Timer needs to
+        * be re-initialized Always after a system resume. Ports are subject
+        * to suffer the Compliance Mode issue again. It doesn't matter if
+        * ports have entered previously to U0 before system's suspension.
+        */
+       if (xhci->quirks & XHCI_COMP_MODE_QUIRK)
+               compliance_mode_recovery_timer_init(xhci);
+
        return retval;
 }
 #endif /* CONFIG_PM */
index c713256297acd073e7589f2fdfd936390fb8bbd0..1a05908c66737ce94a77f880d1bc14cb7e40134a 100644 (file)
@@ -1495,6 +1495,7 @@ struct xhci_hcd {
 #define XHCI_LPM_SUPPORT       (1 << 11)
 #define XHCI_INTEL_HOST                (1 << 12)
 #define XHCI_SPURIOUS_REBOOT   (1 << 13)
+#define XHCI_COMP_MODE_QUIRK   (1 << 14)
        unsigned int            num_active_eps;
        unsigned int            limit_active_eps;
        /* There are two roothubs to keep track of bus suspend info for */
@@ -1511,6 +1512,11 @@ struct xhci_hcd {
        unsigned                sw_lpm_support:1;
        /* support xHCI 1.0 spec USB2 hardware LPM */
        unsigned                hw_lpm_support:1;
+       /* Compliance Mode Recovery Data */
+       struct timer_list       comp_mode_recovery_timer;
+       u32                     port_status_u0;
+/* Compliance Mode Timer Triggered every 2 seconds */
+#define COMP_MODE_RCVRY_MSECS 2000
 };
 
 /* convert between an HCD pointer and the corresponding EHCI_HCD */
index 4bb717d0bd41b12a8b0f6fd879c987286a8059b5..1ae378d5fc6f25b5861eb2e3ef79b32a76a4885f 100644 (file)
@@ -2049,7 +2049,7 @@ static int musb_urb_enqueue(
         * we only have work to do in the former case.
         */
        spin_lock_irqsave(&musb->lock, flags);
-       if (hep->hcpriv) {
+       if (hep->hcpriv || !next_urb(qh)) {
                /* some concurrent activity submitted another urb to hep...
                 * odd, rare, error prone, but legal.
                 */
index 57a608584e160248708aca39570a7e30bf55d7a5..c1be687e00ec722524f8807ff8d628ec7a5d0798 100644 (file)
@@ -388,7 +388,7 @@ dma_controller_create(struct musb *musb, void __iomem *base)
        struct platform_device *pdev = to_platform_device(dev);
        int irq = platform_get_irq_byname(pdev, "dma");
 
-       if (irq == 0) {
+       if (irq <= 0) {
                dev_err(dev, "No DMA interrupt line!\n");
                return NULL;
        }
index 1a1bd9cf40c5ce7c1d6f7ef1ac46360d0b6884a4..341625442377ec6f1daac69450269c95f4148eb5 100644 (file)
@@ -1215,7 +1215,7 @@ static int __devinit tusb_probe(struct platform_device *pdev)
        ret = platform_device_add(musb);
        if (ret) {
                dev_err(&pdev->dev, "failed to register musb device\n");
-               goto err1;
+               goto err2;
        }
 
        return 0;
index ecd173032fd480cc5954c48f03aa61e7b5c439e8..143c4e9e1be45cc24c0a612d38e590a5ad136be9 100644 (file)
@@ -818,7 +818,7 @@ static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done)
            usbhs_pipe_is_dcp(pipe))
                goto usbhsf_pio_prepare_push;
 
-       if (len % 4) /* 32bit alignment */
+       if (len & 0x7) /* 8byte alignment */
                goto usbhsf_pio_prepare_push;
 
        if ((uintptr_t)(pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */
@@ -905,7 +905,7 @@ static int usbhsf_dma_try_pop(struct usbhs_pkt *pkt, int *is_done)
        /* use PIO if packet is less than pio_dma_border */
        len = usbhsf_fifo_rcv_len(priv, fifo);
        len = min(pkt->length - pkt->actual, len);
-       if (len % 4) /* 32bit alignment */
+       if (len & 0x7) /* 8byte alignment */
                goto usbhsf_pio_prepare_pop_unselect;
 
        if (len < usbhs_get_dparam(priv, pio_dma_border))
index 5620db6469e586f85ea8eacfa79798b1b4a676d8..f906b3aec2179ceb708c9cce772cd85a4e1a1724 100644 (file)
@@ -704,6 +704,7 @@ static struct usb_device_id id_table_combined [] = {
        { USB_DEVICE(FTDI_VID, FTDI_PCDJ_DAC2_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_RRCIRKITS_LOCOBUFFER_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_ASK_RDR400_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_NZR_SEM_USB_PID) },
        { USB_DEVICE(ICOM_VID, ICOM_ID_1_PID) },
        { USB_DEVICE(ICOM_VID, ICOM_OPC_U_UC_PID) },
        { USB_DEVICE(ICOM_VID, ICOM_ID_RP2C1_PID) },
@@ -804,13 +805,32 @@ static struct usb_device_id id_table_combined [] = {
                .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
        { USB_DEVICE(ADI_VID, ADI_GNICEPLUS_PID),
                .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
-       { USB_DEVICE(MICROCHIP_VID, MICROCHIP_USB_BOARD_PID) },
+       { USB_DEVICE_AND_INTERFACE_INFO(MICROCHIP_VID, MICROCHIP_USB_BOARD_PID,
+                                       USB_CLASS_VENDOR_SPEC,
+                                       USB_SUBCLASS_VENDOR_SPEC, 0x00) },
        { USB_DEVICE(JETI_VID, JETI_SPC1201_PID) },
        { USB_DEVICE(MARVELL_VID, MARVELL_SHEEVAPLUG_PID),
                .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
        { USB_DEVICE(LARSENBRUSGAARD_VID, LB_ALTITRACK_PID) },
        { USB_DEVICE(GN_OTOMETRICS_VID, AURICAL_USB_PID) },
+       { USB_DEVICE(FTDI_VID, PI_C865_PID) },
+       { USB_DEVICE(FTDI_VID, PI_C857_PID) },
+       { USB_DEVICE(PI_VID, PI_C866_PID) },
+       { USB_DEVICE(PI_VID, PI_C663_PID) },
+       { USB_DEVICE(PI_VID, PI_C725_PID) },
+       { USB_DEVICE(PI_VID, PI_E517_PID) },
+       { USB_DEVICE(PI_VID, PI_C863_PID) },
        { USB_DEVICE(PI_VID, PI_E861_PID) },
+       { USB_DEVICE(PI_VID, PI_C867_PID) },
+       { USB_DEVICE(PI_VID, PI_E609_PID) },
+       { USB_DEVICE(PI_VID, PI_E709_PID) },
+       { USB_DEVICE(PI_VID, PI_100F_PID) },
+       { USB_DEVICE(PI_VID, PI_1011_PID) },
+       { USB_DEVICE(PI_VID, PI_1012_PID) },
+       { USB_DEVICE(PI_VID, PI_1013_PID) },
+       { USB_DEVICE(PI_VID, PI_1014_PID) },
+       { USB_DEVICE(PI_VID, PI_1015_PID) },
+       { USB_DEVICE(PI_VID, PI_1016_PID) },
        { USB_DEVICE(KONDO_VID, KONDO_USB_SERIAL_PID) },
        { USB_DEVICE(BAYER_VID, BAYER_CONTOUR_CABLE_PID) },
        { USB_DEVICE(FTDI_VID, MARVELL_OPENRD_PID),
index 5dd96ca6c380a0971921d198e12399e1902663db..41fe5826100c0ad27a62d21d16e8d5845dfa25cc 100644 (file)
@@ -75,6 +75,9 @@
 #define FTDI_OPENDCC_GATEWAY_PID       0xBFDB
 #define FTDI_OPENDCC_GBM_PID   0xBFDC
 
+/* NZR SEM 16+ USB (http://www.nzr.de) */
+#define FTDI_NZR_SEM_USB_PID   0xC1E0  /* NZR SEM-LOG16+ */
+
 /*
  * RR-CirKits LocoBuffer USB (http://www.rr-cirkits.com)
  */
 /*
  * Microchip Technology, Inc.
  *
- * MICROCHIP_VID (0x04D8) and MICROCHIP_USB_BOARD_PID (0x000A) are also used by:
+ * MICROCHIP_VID (0x04D8) and MICROCHIP_USB_BOARD_PID (0x000A) are
+ * used by single function CDC ACM class based firmware demo
+ * applications.  The VID/PID has also been used in firmware
+ * emulating FTDI serial chips by:
  * Hornby Elite - Digital Command Control Console
  * http://www.hornby.com/hornby-dcc/controllers/
  */
  * Physik Instrumente
  * http://www.physikinstrumente.com/en/products/
  */
+/* These two devices use the VID of FTDI */
+#define PI_C865_PID    0xe0a0  /* PI C-865 Piezomotor Controller */
+#define PI_C857_PID    0xe0a1  /* PI Encoder Trigger Box */
+
 #define PI_VID              0x1a72  /* Vendor ID */
-#define PI_E861_PID         0x1008  /* E-861 piezo controller USB connection */
+#define PI_C866_PID    0x1000  /* PI C-866 Piezomotor Controller */
+#define PI_C663_PID    0x1001  /* PI C-663 Mercury-Step */
+#define PI_C725_PID    0x1002  /* PI C-725 Piezomotor Controller */
+#define PI_E517_PID    0x1005  /* PI E-517 Digital Piezo Controller Operation Module */
+#define PI_C863_PID    0x1007  /* PI C-863 */
+#define PI_E861_PID    0x1008  /* PI E-861 Piezomotor Controller */
+#define PI_C867_PID    0x1009  /* PI C-867 Piezomotor Controller */
+#define PI_E609_PID    0x100D  /* PI E-609 Digital Piezo Controller */
+#define PI_E709_PID    0x100E  /* PI E-709 Digital Piezo Controller */
+#define PI_100F_PID    0x100F  /* PI Digital Piezo Controller */
+#define PI_1011_PID    0x1011  /* PI Digital Piezo Controller */
+#define PI_1012_PID    0x1012  /* PI Motion Controller */
+#define PI_1013_PID    0x1013  /* PI Motion Controller */
+#define PI_1014_PID    0x1014  /* PI Device */
+#define PI_1015_PID    0x1015  /* PI Device */
+#define PI_1016_PID    0x1016  /* PI Digital Servo Module */
 
 /*
  * Kondo Kagaku Co.Ltd.
index cc40f47ecea13ff2041389d5aa7d5526159e58d8..5ce88d1bc6f1e3ac15ac91290ad8cb2fd5935361 100644 (file)
@@ -886,8 +886,6 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff),
          .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1018, 0xff, 0xff, 0xff),
-         .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1057, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1058, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1059, 0xff, 0xff, 0xff) },
@@ -1092,6 +1090,10 @@ static const struct usb_device_id option_ids[] = {
         .driver_info = (kernel_ulong_t)&zte_ad3812_z_blacklist },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2716, 0xff, 0xff, 0xff),
         .driver_info = (kernel_ulong_t)&zte_mc2716_z_blacklist },
+       { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) },
+       { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) },
+       { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) },
+
        { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) },
        { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) },
        { USB_DEVICE(ALINK_VENDOR_ID, DLINK_PRODUCT_DWM_652_U5) }, /* Yes, ALINK_VENDOR_ID */
index 77da6a2f43dc986a5b6eb17d065b9f53b80dce69..c03ecdd31e4c6133f31a54c143038682c694dc87 100644 (file)
@@ -987,7 +987,6 @@ err_regfb:
        fb_dealloc_cmap(&info->cmap);
 err_cmap:
        fb_deferred_io_cleanup(info);
-       kfree(info->fbdefio);
 err_defio:
        vfree((void *)info->screen_base);
 err_irq:
@@ -1022,7 +1021,6 @@ int  __devexit auok190x_common_remove(struct platform_device *pdev)
        fb_dealloc_cmap(&info->cmap);
 
        fb_deferred_io_cleanup(info);
-       kfree(info->fbdefio);
 
        vfree((void *)info->screen_base);
 
index 28b1a834906b1f59d31c49421e771aa5e6f8aae7..61b182bf32a22962492cc79a3b1e60f3f49ef388 100644 (file)
@@ -162,7 +162,7 @@ static void bit_putcs(struct vc_data *vc, struct fb_info *info,
        image.depth = 1;
 
        if (attribute) {
-               buf = kmalloc(cellsize, GFP_KERNEL);
+               buf = kmalloc(cellsize, GFP_ATOMIC);
                if (!buf)
                        return;
        }
index 88e92041d8f02d6136458535c61c3090c304e576..fdefa8fd72c4da070787d95260b2744d6a9a7dab 100644 (file)
@@ -449,7 +449,7 @@ static int __init fb_console_setup(char *this_opt)
 
        while ((options = strsep(&this_opt, ",")) != NULL) {
                if (!strncmp(options, "font:", 5))
-                       strcpy(fontname, options + 5);
+                       strlcpy(fontname, options + 5, sizeof(fontname));
                
                if (!strncmp(options, "scrollback:", 11)) {
                        options += 11;
index 00ce1f34b4965aa1d20f9bc33c709046551f2eca..57d940be5f3d7eaa25b2358c83e169947a779bdf 100644 (file)
@@ -328,6 +328,8 @@ static int mb862xxfb_ioctl(struct fb_info *fbi, unsigned int cmd,
        case MB862XX_L1_SET_CFG:
                if (copy_from_user(l1_cfg, argp, sizeof(*l1_cfg)))
                        return -EFAULT;
+               if (l1_cfg->dh == 0 || l1_cfg->dw == 0)
+                       return -EINVAL;
                if ((l1_cfg->sw >= l1_cfg->dw) && (l1_cfg->sh >= l1_cfg->dh)) {
                        /* downscaling */
                        outreg(cap, GC_CAP_CSC,
index 5d31699fbd3caf4c885840b77c76fd4512d52ed9..f43bfe17b3b699289152cb9ff195afcb69f137f2 100644 (file)
@@ -105,6 +105,20 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev)
 
        sdi_config_lcd_manager(dssdev);
 
+       /*
+        * LCLK and PCLK divisors are located in shadow registers, and we
+        * normally write them to DISPC registers when enabling the output.
+        * However, SDI uses pck-free as source clock for its PLL, and pck-free
+        * is affected by the divisors. And as we need the PLL before enabling
+        * the output, we need to write the divisors early.
+        *
+        * It seems just writing to the DISPC register is enough, and we don't
+        * need to care about the shadow register mechanism for pck-free. The
+        * exact reason for this is unknown.
+        */
+       dispc_mgr_set_clock_div(dssdev->manager->id,
+                       &sdi.mgr_config.clock_info);
+
        dss_sdi_init(dssdev->phy.sdi.datapairs);
        r = dss_sdi_enable();
        if (r)
index 08ec1a7103f2b728420f9e534a2002b20f457d45..fc671d3d8004899ba36064954fc1343ddefb85e2 100644 (file)
@@ -1192,7 +1192,7 @@ static int _setcolreg(struct fb_info *fbi, u_int regno, u_int red, u_int green,
                        break;
 
                if (regno < 16) {
-                       u16 pal;
+                       u32 pal;
                        pal = ((red >> (16 - var->red.length)) <<
                                        var->red.offset) |
                                ((green >> (16 - var->green.length)) <<
index 1afb4fba11b430b5b7cf0f0b16cd2afa8fb61f08..4d519488d3045355672b8f48e3c1e2eb4ac64864 100644 (file)
@@ -232,7 +232,7 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
                return ret;
 
        if (hwdev && hwdev->coherent_dma_mask)
-               dma_mask = hwdev->coherent_dma_mask;
+               dma_mask = dma_alloc_coherent_mask(hwdev, flags);
 
        phys = virt_to_phys(ret);
        dev_addr = xen_phys_to_bus(phys);
index b590ee067fcd3d3755efc35499f7bdee4151f41d..316df65163cfa5d6485a256edf2cdab24e8805d6 100644 (file)
@@ -98,7 +98,6 @@ static int push_cxx_to_hypervisor(struct acpi_processor *_pr)
 
                dst_cx->type = cx->type;
                dst_cx->latency = cx->latency;
-               dst_cx->power = cx->power;
 
                dst_cx->dpcnt = 0;
                set_xen_guest_handle(dst_cx->dp, NULL);
index 097e536e8672d68ff32da41406156a75ed05a996..03342728bf2352ff751f21c333b58bd5cb8756b6 100644 (file)
@@ -353,16 +353,16 @@ static int __devinit pcistub_init_device(struct pci_dev *dev)
        if (err)
                goto config_release;
 
-       dev_dbg(&dev->dev, "reseting (FLR, D3, etc) the device\n");
-       __pci_reset_function_locked(dev);
-
        /* We need the device active to save the state. */
        dev_dbg(&dev->dev, "save state of device\n");
        pci_save_state(dev);
        dev_data->pci_saved_state = pci_store_saved_state(dev);
        if (!dev_data->pci_saved_state)
                dev_err(&dev->dev, "Could not store PCI conf saved state!\n");
-
+       else {
+               dev_dbg(&dev->dev, "reseting (FLR, D3, etc) the device\n");
+               __pci_reset_function_locked(dev);
+       }
        /* Now disable the device (this also ensures some private device
         * data is setup before we export)
         */
index 38b42e7bc91d0e258a7fd086d793aae3d43ac4c1..b65015581744a6eefb9b3ed720478673b888d336 100644 (file)
@@ -1371,10 +1371,8 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
 
        if (srcid) {
                srcgroup = find_qgroup_rb(fs_info, srcid);
-               if (!srcgroup) {
-                       ret = -EINVAL;
+               if (!srcgroup)
                        goto unlock;
-               }
                dstgroup->rfer = srcgroup->rfer - level_size;
                dstgroup->rfer_cmpr = srcgroup->rfer_cmpr - level_size;
                srcgroup->excl = level_size;
@@ -1383,10 +1381,8 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
                qgroup_dirty(fs_info, srcgroup);
        }
 
-       if (!inherit) {
-               ret = -EINVAL;
+       if (!inherit)
                goto unlock;
-       }
 
        i_qgroups = (u64 *)(inherit + 1);
        for (i = 0; i < inherit->num_qgroups; ++i) {
index 074923ce593d7d53da6ae010128dffc8c080815b..f0cf934ba877d8549d8631787a4aea8da871b371 100644 (file)
@@ -1576,9 +1576,14 @@ cifs_readv_callback(struct mid_q_entry *mid)
                /* result already set, check signature */
                if (server->sec_mode &
                    (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
-                       if (cifs_verify_signature(rdata->iov, rdata->nr_iov,
-                                         server, mid->sequence_number + 1))
-                               cERROR(1, "Unexpected SMB signature");
+                       int rc = 0;
+
+                       rc = cifs_verify_signature(rdata->iov, rdata->nr_iov,
+                                                  server,
+                                                  mid->sequence_number + 1);
+                       if (rc)
+                               cERROR(1, "SMB signature verification returned "
+                                      "error = %d", rc);
                }
                /* FIXME: should this be counted toward the initiating task? */
                task_io_account_read(rdata->bytes);
index cbe709ad6663485cdcec49dbce9d1156de510c5d..781025be48bc47581c7484e8d55948914d588848 100644 (file)
@@ -356,19 +356,12 @@ cifs_create_get_file_info:
 cifs_create_set_dentry:
        if (rc != 0) {
                cFYI(1, "Create worked, get_inode_info failed rc = %d", rc);
+               CIFSSMBClose(xid, tcon, *fileHandle);
                goto out;
        }
        d_drop(direntry);
        d_add(direntry, newinode);
 
-       /* ENOENT for create?  How weird... */
-       rc = -ENOENT;
-       if (!newinode) {
-               CIFSSMBClose(xid, tcon, *fileHandle);
-               goto out;
-       }
-       rc = 0;
-
 out:
        kfree(buf);
        kfree(full_path);
index 9154192b0683e368a521ddf118961e1cdd592355..71e9ad9f59610aedef820784e558e1652973a553 100644 (file)
@@ -917,7 +917,7 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
        if (!buf) {
                mutex_unlock(&cinode->lock_mutex);
                free_xid(xid);
-               return rc;
+               return -ENOMEM;
        }
 
        for (i = 0; i < 2; i++) {
index 7354877fa3bd825519ea981b1c6208fd886dff11..cb79c7edecb0f3b2856ce8817c3515e8ad827e59 100644 (file)
@@ -124,10 +124,10 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
 {
        struct cifsInodeInfo *cifs_i = CIFS_I(inode);
        struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
-       unsigned long oldtime = cifs_i->time;
 
        cifs_revalidate_cache(inode, fattr);
 
+       spin_lock(&inode->i_lock);
        inode->i_atime = fattr->cf_atime;
        inode->i_mtime = fattr->cf_mtime;
        inode->i_ctime = fattr->cf_ctime;
@@ -148,9 +148,6 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
        else
                cifs_i->time = jiffies;
 
-       cFYI(1, "inode 0x%p old_time=%ld new_time=%ld", inode,
-                oldtime, cifs_i->time);
-
        cifs_i->delete_pending = fattr->cf_flags & CIFS_FATTR_DELETE_PENDING;
 
        cifs_i->server_eof = fattr->cf_eof;
@@ -158,7 +155,6 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
         * Can't safely change the file size here if the client is writing to
         * it due to potential races.
         */
-       spin_lock(&inode->i_lock);
        if (is_size_safe_to_change(cifs_i, fattr->cf_eof)) {
                i_size_write(inode, fattr->cf_eof);
 
@@ -859,12 +855,14 @@ struct inode *cifs_root_iget(struct super_block *sb)
 
        if (rc && tcon->ipc) {
                cFYI(1, "ipc connection - fake read inode");
+               spin_lock(&inode->i_lock);
                inode->i_mode |= S_IFDIR;
                set_nlink(inode, 2);
                inode->i_op = &cifs_ipc_inode_ops;
                inode->i_fop = &simple_dir_operations;
                inode->i_uid = cifs_sb->mnt_uid;
                inode->i_gid = cifs_sb->mnt_gid;
+               spin_unlock(&inode->i_lock);
        } else if (rc) {
                iget_failed(inode);
                inode = ERR_PTR(rc);
@@ -1110,6 +1108,15 @@ undo_setattr:
        goto out_close;
 }
 
+/* copied from fs/nfs/dir.c with small changes */
+static void
+cifs_drop_nlink(struct inode *inode)
+{
+       spin_lock(&inode->i_lock);
+       if (inode->i_nlink > 0)
+               drop_nlink(inode);
+       spin_unlock(&inode->i_lock);
+}
 
 /*
  * If dentry->d_inode is null (usually meaning the cached dentry
@@ -1166,13 +1173,13 @@ retry_std_delete:
 psx_del_no_retry:
        if (!rc) {
                if (inode)
-                       drop_nlink(inode);
+                       cifs_drop_nlink(inode);
        } else if (rc == -ENOENT) {
                d_drop(dentry);
        } else if (rc == -ETXTBSY) {
                rc = cifs_rename_pending_delete(full_path, dentry, xid);
                if (rc == 0)
-                       drop_nlink(inode);
+                       cifs_drop_nlink(inode);
        } else if ((rc == -EACCES) && (dosattr == 0) && inode) {
                attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
                if (attrs == NULL) {
@@ -1241,9 +1248,10 @@ cifs_mkdir_qinfo(struct inode *inode, struct dentry *dentry, umode_t mode,
         * setting nlink not necessary except in cases where we failed to get it
         * from the server or was set bogus
         */
+       spin_lock(&dentry->d_inode->i_lock);
        if ((dentry->d_inode) && (dentry->d_inode->i_nlink < 2))
                set_nlink(dentry->d_inode, 2);
-
+       spin_unlock(&dentry->d_inode->i_lock);
        mode &= ~current_umask();
        /* must turn on setgid bit if parent dir has it */
        if (inode->i_mode & S_ISGID)
index 09e4b3ae45640e3d0c1bd8757cc6da935fca65f5..e6ce3b1128756be4496494b2623fab27fa0d165f 100644 (file)
@@ -433,7 +433,9 @@ cifs_hardlink(struct dentry *old_file, struct inode *inode,
        if (old_file->d_inode) {
                cifsInode = CIFS_I(old_file->d_inode);
                if (rc == 0) {
+                       spin_lock(&old_file->d_inode->i_lock);
                        inc_nlink(old_file->d_inode);
+                       spin_unlock(&old_file->d_inode->i_lock);
 /* BB should we make this contingent on superblock flag NOATIME? */
 /*                     old_file->d_inode->i_ctime = CURRENT_TIME;*/
                        /* parent dir timestamps will update from srv
index a4ff5d547554d174466bc48eb7c8fccb48c8f668..e4d3b99641673670b681ca41e443d3a3083359c1 100644 (file)
@@ -52,7 +52,8 @@ check_smb2_hdr(struct smb2_hdr *hdr, __u64 mid)
                        cERROR(1, "Bad protocol string signature header %x",
                                  *(unsigned int *) hdr->ProtocolId);
                if (mid != hdr->MessageId)
-                       cERROR(1, "Mids do not match");
+                       cERROR(1, "Mids do not match: %llu and %llu", mid,
+                                 hdr->MessageId);
        }
        cERROR(1, "Bad SMB detected. The Mid=%llu", hdr->MessageId);
        return 1;
@@ -107,7 +108,7 @@ smb2_check_message(char *buf, unsigned int length)
         * ie Validate the wct via smb2_struct_sizes table above
         */
 
-       if (length < 2 + sizeof(struct smb2_hdr)) {
+       if (length < sizeof(struct smb2_pdu)) {
                if ((length >= sizeof(struct smb2_hdr)) && (hdr->Status != 0)) {
                        pdu->StructureSize2 = 0;
                        /*
@@ -121,15 +122,15 @@ smb2_check_message(char *buf, unsigned int length)
                return 1;
        }
        if (len > CIFSMaxBufSize + MAX_SMB2_HDR_SIZE - 4) {
-               cERROR(1, "SMB length greater than maximum, mid=%lld", mid);
+               cERROR(1, "SMB length greater than maximum, mid=%llu", mid);
                return 1;
        }
 
        if (check_smb2_hdr(hdr, mid))
                return 1;
 
-       if (hdr->StructureSize != SMB2_HEADER_SIZE) {
-               cERROR(1, "Illegal structure size %d",
+       if (hdr->StructureSize != SMB2_HEADER_STRUCTURE_SIZE) {
+               cERROR(1, "Illegal structure size %u",
                          le16_to_cpu(hdr->StructureSize));
                return 1;
        }
@@ -161,8 +162,9 @@ smb2_check_message(char *buf, unsigned int length)
        if (4 + len != clc_len) {
                cFYI(1, "Calculated size %u length %u mismatch mid %llu",
                        clc_len, 4 + len, mid);
-               if (clc_len == 4 + len + 1) /* BB FIXME (fix samba) */
-                       return 0; /* BB workaround Samba 3 bug SessSetup rsp */
+               /* server can return one byte more */
+               if (clc_len == 4 + len + 1)
+                       return 0;
                return 1;
        }
        return 0;
index f37a1b41b402b76e9f5ce7ae155e7bd5bb3a08a3..15dc8eea82731fb8c00485d67493ab30d3a39318 100644 (file)
 
 #define SMB2_PROTO_NUMBER __constant_cpu_to_le32(0x424d53fe)
 
-#define SMB2_HEADER_SIZE __constant_le16_to_cpu(64)
-
-#define SMB2_ERROR_STRUCTURE_SIZE2 __constant_le16_to_cpu(9)
-
 /*
  * SMB2 Header Definition
  *
@@ -99,6 +95,9 @@
  * "PDU" :  "Protocol Data Unit" (ie a network "frame")
  *
  */
+
+#define SMB2_HEADER_STRUCTURE_SIZE __constant_cpu_to_le16(64)
+
 struct smb2_hdr {
        __be32 smb2_buf_length; /* big endian on wire */
                                /* length is only two or three bytes - with
@@ -140,6 +139,9 @@ struct smb2_pdu {
  *  command code name for the struct. Note that structures must be packed.
  *
  */
+
+#define SMB2_ERROR_STRUCTURE_SIZE2 __constant_cpu_to_le16(9)
+
 struct smb2_err_rsp {
        struct smb2_hdr hdr;
        __le16 StructureSize;
index 83867ef348dfe15276d83379f12ae5c46399f7a0..d9b639b95fa8b8a5167ecf58fa3285f90ff2520f 100644 (file)
@@ -503,13 +503,16 @@ cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
        /* convert the length into a more usable form */
        if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
                struct kvec iov;
+               int rc = 0;
 
                iov.iov_base = mid->resp_buf;
                iov.iov_len = len;
                /* FIXME: add code to kill session */
-               if (cifs_verify_signature(&iov, 1, server,
-                                         mid->sequence_number + 1) != 0)
-                       cERROR(1, "Unexpected SMB signature");
+               rc = cifs_verify_signature(&iov, 1, server,
+                                          mid->sequence_number + 1);
+               if (rc)
+                       cERROR(1, "SMB signature verification returned error = "
+                              "%d", rc);
        }
 
        /* BB special case reconnect tid and uid here? */
index 44ce5c6a541d65b7ceae1e7d67655a2e3f0f109a..d45ba4568128eb17baf60535d6dc00e663196afa 100644 (file)
@@ -275,8 +275,14 @@ out:
 
 static int ecryptfs_flush(struct file *file, fl_owner_t td)
 {
-       return file->f_mode & FMODE_WRITE
-              ? filemap_write_and_wait(file->f_mapping) : 0;
+       struct file *lower_file = ecryptfs_file_to_lower(file);
+
+       if (lower_file->f_op && lower_file->f_op->flush) {
+               filemap_write_and_wait(file->f_mapping);
+               return lower_file->f_op->flush(lower_file, td);
+       }
+
+       return 0;
 }
 
 static int ecryptfs_release(struct inode *inode, struct file *file)
index 534b129ea676500c4df149d95cb9bd5be517dc54..cc7709e7c508d81a1429ffa25bace9c7a101a832 100644 (file)
@@ -619,6 +619,7 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry,
        struct dentry *lower_old_dir_dentry;
        struct dentry *lower_new_dir_dentry;
        struct dentry *trap = NULL;
+       struct inode *target_inode;
 
        lower_old_dentry = ecryptfs_dentry_to_lower(old_dentry);
        lower_new_dentry = ecryptfs_dentry_to_lower(new_dentry);
@@ -626,6 +627,7 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry,
        dget(lower_new_dentry);
        lower_old_dir_dentry = dget_parent(lower_old_dentry);
        lower_new_dir_dentry = dget_parent(lower_new_dentry);
+       target_inode = new_dentry->d_inode;
        trap = lock_rename(lower_old_dir_dentry, lower_new_dir_dentry);
        /* source should not be ancestor of target */
        if (trap == lower_old_dentry) {
@@ -641,6 +643,9 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry,
                        lower_new_dir_dentry->d_inode, lower_new_dentry);
        if (rc)
                goto out_lock;
+       if (target_inode)
+               fsstack_copy_attr_all(target_inode,
+                                     ecryptfs_inode_to_lower(target_inode));
        fsstack_copy_attr_all(new_dir, lower_new_dir_dentry->d_inode);
        if (new_dir != old_dir)
                fsstack_copy_attr_all(old_dir, lower_old_dir_dentry->d_inode);
index 2768138eefeef85707f9ee29652532b25d50dfcb..9b627c15010a3af35e1f2ec85ccafc2b18d97d44 100644 (file)
@@ -162,6 +162,7 @@ void ecryptfs_put_lower_file(struct inode *inode)
        inode_info = ecryptfs_inode_to_private(inode);
        if (atomic_dec_and_mutex_lock(&inode_info->lower_file_count,
                                      &inode_info->lower_file_mutex)) {
+               filemap_write_and_wait(inode->i_mapping);
                fput(inode_info->lower_file);
                inode_info->lower_file = NULL;
                mutex_unlock(&inode_info->lower_file_mutex);
index a07597307fd1cd221b20997d9e0268caa6bc9138..ff574b4e345efd09a7c2e6f2511213210fbcc92d 100644 (file)
@@ -3072,6 +3072,8 @@ static int ext3_do_update_inode(handle_t *handle,
        struct ext3_inode_info *ei = EXT3_I(inode);
        struct buffer_head *bh = iloc->bh;
        int err = 0, rc, block;
+       int need_datasync = 0;
+       __le32 disksize;
        uid_t i_uid;
        gid_t i_gid;
 
@@ -3113,7 +3115,11 @@ again:
                raw_inode->i_gid_high = 0;
        }
        raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
-       raw_inode->i_size = cpu_to_le32(ei->i_disksize);
+       disksize = cpu_to_le32(ei->i_disksize);
+       if (disksize != raw_inode->i_size) {
+               need_datasync = 1;
+               raw_inode->i_size = disksize;
+       }
        raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
        raw_inode->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
        raw_inode->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
@@ -3129,8 +3135,11 @@ again:
        if (!S_ISREG(inode->i_mode)) {
                raw_inode->i_dir_acl = cpu_to_le32(ei->i_dir_acl);
        } else {
-               raw_inode->i_size_high =
-                       cpu_to_le32(ei->i_disksize >> 32);
+               disksize = cpu_to_le32(ei->i_disksize >> 32);
+               if (disksize != raw_inode->i_size_high) {
+                       raw_inode->i_size_high = disksize;
+                       need_datasync = 1;
+               }
                if (ei->i_disksize > 0x7fffffffULL) {
                        struct super_block *sb = inode->i_sb;
                        if (!EXT3_HAS_RO_COMPAT_FEATURE(sb,
@@ -3183,6 +3192,8 @@ again:
        ext3_clear_inode_state(inode, EXT3_STATE_NEW);
 
        atomic_set(&ei->i_sync_tid, handle->h_transaction->t_tid);
+       if (need_datasync)
+               atomic_set(&ei->i_datasync_tid, handle->h_transaction->t_tid);
 out_brelse:
        brelse (bh);
        ext3_std_error(inode->i_sb, err);
index 03ff5b1eba93ec21e11d9ca31f9c8b3f22e65d36..75a20c092dd43b573c4f24788d067b3e6b1bfef0 100644 (file)
@@ -117,7 +117,7 @@ static ssize_t fuse_conn_max_background_write(struct file *file,
                                              const char __user *buf,
                                              size_t count, loff_t *ppos)
 {
-       unsigned val;
+       unsigned uninitialized_var(val);
        ssize_t ret;
 
        ret = fuse_conn_limit_write(file, buf, count, ppos, &val,
@@ -154,7 +154,7 @@ static ssize_t fuse_conn_congestion_threshold_write(struct file *file,
                                                    const char __user *buf,
                                                    size_t count, loff_t *ppos)
 {
-       unsigned val;
+       unsigned uninitialized_var(val);
        ssize_t ret;
 
        ret = fuse_conn_limit_write(file, buf, count, ppos, &val,
index 3426521f3205cce09a98b8a1bd01ffe9e44f84c0..ee8d55042298272f6ac6c76982f4ecd5efb745ca 100644 (file)
@@ -396,7 +396,7 @@ err_device:
 err_region:
        unregister_chrdev_region(devt, 1);
 err:
-       fc->conn_error = 1;
+       fuse_conn_kill(fc);
        goto out;
 }
 
@@ -532,8 +532,6 @@ static int cuse_channel_release(struct inode *inode, struct file *file)
                cdev_del(cc->cdev);
        }
 
-       /* kill connection and shutdown channel */
-       fuse_conn_kill(&cc->fc);
        rc = fuse_dev_release(inode, file);     /* puts the base reference */
 
        return rc;
index 7df2b5e8fbe187af6599504f935d1ed463a40c64..f4246cfc8d876db6ac39a6ef058b144c73d503af 100644 (file)
@@ -1576,6 +1576,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
                req->pages[req->num_pages] = page;
                req->num_pages++;
 
+               offset = 0;
                num -= this_num;
                total_len += this_num;
                index++;
index ce0a2838ccd097a5392d469fc0650d2e7b0d7e8d..fca222dabe3ccc4a791e894d325bdc4e4f78b7f3 100644 (file)
@@ -367,11 +367,6 @@ void fuse_conn_kill(struct fuse_conn *fc)
        wake_up_all(&fc->waitq);
        wake_up_all(&fc->blocked_waitq);
        wake_up_all(&fc->reserved_req_waitq);
-       mutex_lock(&fuse_mutex);
-       list_del(&fc->entry);
-       fuse_ctl_remove_conn(fc);
-       mutex_unlock(&fuse_mutex);
-       fuse_bdi_destroy(fc);
 }
 EXPORT_SYMBOL_GPL(fuse_conn_kill);
 
@@ -380,7 +375,14 @@ static void fuse_put_super(struct super_block *sb)
        struct fuse_conn *fc = get_fuse_conn_super(sb);
 
        fuse_send_destroy(fc);
+
        fuse_conn_kill(fc);
+       mutex_lock(&fuse_mutex);
+       list_del(&fc->entry);
+       fuse_ctl_remove_conn(fc);
+       mutex_unlock(&fuse_mutex);
+       fuse_bdi_destroy(fc);
+
        fuse_conn_put(fc);
 }
 
index d1d791ef38de2188852551254a63f974a605feff..382000ffac1f7e892163665a27982e587b9d83e7 100644 (file)
@@ -322,6 +322,29 @@ static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
        return -ENOTTY;
 }
 
+/**
+ * gfs2_size_hint - Give a hint to the size of a write request
+ * @file: The struct file
+ * @offset: The file offset of the write
+ * @size: The length of the write
+ *
+ * When we are about to do a write, this function records the total
+ * write size in order to provide a suitable hint to the lower layers
+ * about how many blocks will be required.
+ *
+ */
+
+static void gfs2_size_hint(struct file *filep, loff_t offset, size_t size)
+{
+       struct inode *inode = filep->f_dentry->d_inode;
+       struct gfs2_sbd *sdp = GFS2_SB(inode);
+       struct gfs2_inode *ip = GFS2_I(inode);
+       size_t blks = (size + sdp->sd_sb.sb_bsize - 1) >> sdp->sd_sb.sb_bsize_shift;
+       int hint = min_t(size_t, INT_MAX, blks);
+
+       atomic_set(&ip->i_res->rs_sizehint, hint);
+}
+
 /**
  * gfs2_allocate_page_backing - Use bmap to allocate blocks
  * @page: The (locked) page to allocate backing for
@@ -382,8 +405,7 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
        if (ret)
                return ret;
 
-       atomic_set(&ip->i_res->rs_sizehint,
-                  PAGE_CACHE_SIZE >> sdp->sd_sb.sb_bsize_shift);
+       gfs2_size_hint(vma->vm_file, pos, PAGE_CACHE_SIZE);
 
        gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
        ret = gfs2_glock_nq(&gh);
@@ -663,7 +685,8 @@ static ssize_t gfs2_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
        if (ret)
                return ret;
 
-       atomic_set(&ip->i_res->rs_sizehint, writesize >> sdp->sd_sb.sb_bsize_shift);
+       gfs2_size_hint(file, pos, writesize);
+
        if (file->f_flags & O_APPEND) {
                struct gfs2_holder gh;
 
@@ -789,7 +812,7 @@ static long gfs2_fallocate(struct file *file, int mode, loff_t offset,
        if (unlikely(error))
                goto out_uninit;
 
-       atomic_set(&ip->i_res->rs_sizehint, len >> sdp->sd_sb.sb_bsize_shift);
+       gfs2_size_hint(file, offset, len);
 
        while (len > 0) {
                if (len < bytes)
index 4ce22e54730806e02ed0ba70f7e7fc847d513142..753af3d86bbcecaa76f5c01cb54d81b0006c4fac 100644 (file)
@@ -1722,7 +1722,9 @@ static int gfs2_setxattr(struct dentry *dentry, const char *name,
        gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
        ret = gfs2_glock_nq(&gh);
        if (ret == 0) {
-               ret = generic_setxattr(dentry, name, data, size, flags);
+               ret = gfs2_rs_alloc(ip);
+               if (ret == 0)
+                       ret = generic_setxattr(dentry, name, data, size, flags);
                gfs2_glock_dq(&gh);
        }
        gfs2_holder_uninit(&gh);
@@ -1757,7 +1759,9 @@ static int gfs2_removexattr(struct dentry *dentry, const char *name)
        gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
        ret = gfs2_glock_nq(&gh);
        if (ret == 0) {
-               ret = generic_removexattr(dentry, name);
+               ret = gfs2_rs_alloc(ip);
+               if (ret == 0)
+                       ret = generic_removexattr(dentry, name);
                gfs2_glock_dq(&gh);
        }
        gfs2_holder_uninit(&gh);
index 4d34887a601d966660549b0d0a27517353c1ed5e..c9ed814eeb6f9652eaa927e2ab13fc42c906da14 100644 (file)
@@ -1961,7 +1961,7 @@ static void gfs2_rgrp_error(struct gfs2_rgrpd *rgd)
  * @dinode: 1 if this block is a dinode block, otherwise data block
  * @nblocks: desired extent length
  *
- * Lay claim to previously allocated block reservation blocks.
+ * Lay claim to previously reserved blocks.
  * Returns: Starting block number of the blocks claimed.
  * Sets *nblocks to the actual extent length allocated.
  */
@@ -1970,19 +1970,17 @@ static u64 claim_reserved_blks(struct gfs2_inode *ip, bool dinode,
 {
        struct gfs2_blkreserv *rs = ip->i_res;
        struct gfs2_rgrpd *rgd = rs->rs_rgd;
-       struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
        struct gfs2_bitmap *bi;
        u64 start_block = gfs2_rs_startblk(rs);
        const unsigned int elen = *nblocks;
 
-       /*BUG_ON(!gfs2_glock_is_locked_by_me(ip->i_gl));*/
-       gfs2_assert_withdraw(sdp, rgd);
-       /*BUG_ON(!gfs2_glock_is_locked_by_me(rgd->rd_gl));*/
        bi = rs->rs_bi;
        gfs2_trans_add_bh(rgd->rd_gl, bi->bi_bh, 1);
 
        for (*nblocks = 0; *nblocks < elen && rs->rs_free; (*nblocks)++) {
-               /* Make sure the bitmap hasn't changed */
+               if (gfs2_testbit(rgd, bi->bi_bh->b_data + bi->bi_offset,
+                                bi->bi_len, rs->rs_biblk) != GFS2_BLKST_FREE)
+                       break;
                gfs2_setbit(rgd, bi->bi_clone, bi, rs->rs_biblk,
                            dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
                rs->rs_biblk++;
@@ -1991,20 +1989,12 @@ static u64 claim_reserved_blks(struct gfs2_inode *ip, bool dinode,
                BUG_ON(!rgd->rd_reserved);
                rgd->rd_reserved--;
                dinode = false;
-               trace_gfs2_rs(ip, rs, TRACE_RS_CLAIM);
        }
 
-       if (!rs->rs_free) {
-               struct gfs2_rgrpd *rgd = ip->i_res->rs_rgd;
-
+       trace_gfs2_rs(ip, rs, TRACE_RS_CLAIM);
+       if (!rs->rs_free || *nblocks != elen)
                gfs2_rs_deltree(rs);
-               /* -nblocks because we haven't returned to do the math yet.
-                  I'm doing the math backwards to prevent negative numbers,
-                  but think of it as:
-                  if (unclaimed_blocks(rgd) - *nblocks >= RGRP_RSRV_MINBLKS */
-               if (unclaimed_blocks(rgd) >= RGRP_RSRV_MINBLKS + *nblocks)
-                       rg_mblk_search(rgd, ip);
-       }
+
        return start_block;
 }
 
@@ -2037,34 +2027,34 @@ int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
        if (ip->i_res->rs_requested == 0)
                return -ECANCELED;
 
-       /* Check if we have a multi-block reservation, and if so, claim the
-          next free block from it. */
+       /* If we have a reservation, claim blocks from it. */
        if (gfs2_rs_active(ip->i_res)) {
                BUG_ON(!ip->i_res->rs_free);
                rgd = ip->i_res->rs_rgd;
                block = claim_reserved_blks(ip, dinode, nblocks);
-       } else {
-               rgd = ip->i_rgd;
+               if (*nblocks)
+                       goto found_blocks;
+       }
 
-               if (!dinode && rgrp_contains_block(rgd, ip->i_goal))
-                       goal = ip->i_goal - rgd->rd_data0;
-               else
-                       goal = rgd->rd_last_alloc;
-
-               blk = rgblk_search(rgd, goal, GFS2_BLKST_FREE, &bi);
-
-               /* Since all blocks are reserved in advance, this shouldn't
-                  happen */
-               if (blk == BFITNOENT) {
-                       printk(KERN_WARNING "BFITNOENT, nblocks=%u\n",
-                              *nblocks);
-                       printk(KERN_WARNING "FULL=%d\n",
-                              test_bit(GBF_FULL, &rgd->rd_bits->bi_flags));
-                       goto rgrp_error;
-               }
+       rgd = ip->i_rgd;
 
-               block = gfs2_alloc_extent(rgd, bi, blk, dinode, nblocks);
+       if (!dinode && rgrp_contains_block(rgd, ip->i_goal))
+               goal = ip->i_goal - rgd->rd_data0;
+       else
+               goal = rgd->rd_last_alloc;
+
+       blk = rgblk_search(rgd, goal, GFS2_BLKST_FREE, &bi);
+
+       /* Since all blocks are reserved in advance, this shouldn't happen */
+       if (blk == BFITNOENT) {
+               printk(KERN_WARNING "BFITNOENT, nblocks=%u\n", *nblocks);
+               printk(KERN_WARNING "FULL=%d\n",
+                      test_bit(GBF_FULL, &rgd->rd_bits->bi_flags));
+               goto rgrp_error;
        }
+
+       block = gfs2_alloc_extent(rgd, bi, blk, dinode, nblocks);
+found_blocks:
        ndata = *nblocks;
        if (dinode)
                ndata--;
index 75d6d0a3d32e2685bbd43f791b1f32775c87ce59..6a7fcab7ecb3115c7630573c17f4d8285a418591 100644 (file)
@@ -287,10 +287,12 @@ nfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync)
        struct inode *inode = file->f_path.dentry->d_inode;
 
        ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
+       if (ret != 0)
+               goto out;
        mutex_lock(&inode->i_mutex);
        ret = nfs_file_fsync_commit(file, start, end, datasync);
        mutex_unlock(&inode->i_mutex);
-
+out:
        return ret;
 }
 
index c6e895f0fbf36eee681a5cb5d8e4a92bfd8c0d35..9b47610338f59f03f6b4fdc0280d6aa61c266d4f 100644 (file)
@@ -154,7 +154,7 @@ static void nfs_zap_caches_locked(struct inode *inode)
        nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
        nfsi->attrtimeo_timestamp = jiffies;
 
-       memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
+       memset(NFS_I(inode)->cookieverf, 0, sizeof(NFS_I(inode)->cookieverf));
        if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
                nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
        else
index d6b3b5f2d779acd1ce7e0324e8c52e388c7a273a..69322096c32569d4674517f7121e4fc272206ba8 100644 (file)
@@ -643,7 +643,7 @@ nfs3_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
                  u64 cookie, struct page **pages, unsigned int count, int plus)
 {
        struct inode            *dir = dentry->d_inode;
-       __be32                  *verf = NFS_COOKIEVERF(dir);
+       __be32                  *verf = NFS_I(dir)->cookieverf;
        struct nfs3_readdirargs arg = {
                .fh             = NFS_FH(dir),
                .cookie         = cookie,
index acb65e7887f8437b8aac391255f4be1218524a27..eb5eb8eef4d34db3c7bafe3c84c1db0bf43e8974 100644 (file)
@@ -96,13 +96,15 @@ nfs4_file_fsync(struct file *file, loff_t start, loff_t end, int datasync)
        struct inode *inode = file->f_path.dentry->d_inode;
 
        ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
+       if (ret != 0)
+               goto out;
        mutex_lock(&inode->i_mutex);
        ret = nfs_file_fsync_commit(file, start, end, datasync);
        if (!ret && !datasync)
                /* application has asked for meta-data sync */
                ret = pnfs_layoutcommit_inode(inode, true);
        mutex_unlock(&inode->i_mutex);
-
+out:
        return ret;
 }
 
index 635274140b180287668dbaa7540bd84852051181..1e50326d00ddd1f7ef8931470bc1cd0ad32b4015 100644 (file)
@@ -3215,11 +3215,11 @@ static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
                        dentry->d_parent->d_name.name,
                        dentry->d_name.name,
                        (unsigned long long)cookie);
-       nfs4_setup_readdir(cookie, NFS_COOKIEVERF(dir), dentry, &args);
+       nfs4_setup_readdir(cookie, NFS_I(dir)->cookieverf, dentry, &args);
        res.pgbase = args.pgbase;
        status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0);
        if (status >= 0) {
-               memcpy(NFS_COOKIEVERF(dir), res.verifier.data, NFS4_VERIFIER_SIZE);
+               memcpy(NFS_I(dir)->cookieverf, res.verifier.data, NFS4_VERIFIER_SIZE);
                status += args.pgbase;
        }
 
@@ -3653,11 +3653,11 @@ static inline int nfs4_server_supports_acls(struct nfs_server *server)
                && (server->acl_bitmask & ACL4_SUPPORT_DENY_ACL);
 }
 
-/* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_CACHE_SIZE, and that
- * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_CACHE_SIZE) bytes on
+/* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_SIZE, and that
+ * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_SIZE) bytes on
  * the stack.
  */
-#define NFS4ACL_MAXPAGES (XATTR_SIZE_MAX >> PAGE_CACHE_SHIFT)
+#define NFS4ACL_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE)
 
 static int buf_to_pages_noslab(const void *buf, size_t buflen,
                struct page **pages, unsigned int *pgbase)
@@ -3668,7 +3668,7 @@ static int buf_to_pages_noslab(const void *buf, size_t buflen,
        spages = pages;
 
        do {
-               len = min_t(size_t, PAGE_CACHE_SIZE, buflen);
+               len = min_t(size_t, PAGE_SIZE, buflen);
                newpage = alloc_page(GFP_KERNEL);
 
                if (newpage == NULL)
@@ -3739,7 +3739,7 @@ static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size
        struct nfs4_cached_acl *acl;
        size_t buflen = sizeof(*acl) + acl_len;
 
-       if (pages && buflen <= PAGE_SIZE) {
+       if (buflen <= PAGE_SIZE) {
                acl = kmalloc(buflen, GFP_KERNEL);
                if (acl == NULL)
                        goto out;
@@ -3782,17 +3782,15 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu
                .rpc_argp = &args,
                .rpc_resp = &res,
        };
-       int ret = -ENOMEM, npages, i;
-       size_t acl_len = 0;
+       unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE);
+       int ret = -ENOMEM, i;
 
-       npages = (buflen + PAGE_SIZE - 1) >> PAGE_SHIFT;
        /* As long as we're doing a round trip to the server anyway,
         * let's be prepared for a page of acl data. */
        if (npages == 0)
                npages = 1;
-
-       /* Add an extra page to handle the bitmap returned */
-       npages++;
+       if (npages > ARRAY_SIZE(pages))
+               return -ERANGE;
 
        for (i = 0; i < npages; i++) {
                pages[i] = alloc_page(GFP_KERNEL);
@@ -3808,11 +3806,6 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu
        args.acl_len = npages * PAGE_SIZE;
        args.acl_pgbase = 0;
 
-       /* Let decode_getfacl know not to fail if the ACL data is larger than
-        * the page we send as a guess */
-       if (buf == NULL)
-               res.acl_flags |= NFS4_ACL_LEN_REQUEST;
-
        dprintk("%s  buf %p buflen %zu npages %d args.acl_len %zu\n",
                __func__, buf, buflen, npages, args.acl_len);
        ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode),
@@ -3820,20 +3813,19 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu
        if (ret)
                goto out_free;
 
-       acl_len = res.acl_len;
-       if (acl_len > args.acl_len)
-               nfs4_write_cached_acl(inode, NULL, 0, acl_len);
-       else
-               nfs4_write_cached_acl(inode, pages, res.acl_data_offset,
-                                     acl_len);
-       if (buf) {
+       /* Handle the case where the passed-in buffer is too short */
+       if (res.acl_flags & NFS4_ACL_TRUNC) {
+               /* Did the user only issue a request for the acl length? */
+               if (buf == NULL)
+                       goto out_ok;
                ret = -ERANGE;
-               if (acl_len > buflen)
-                       goto out_free;
-               _copy_from_pages(buf, pages, res.acl_data_offset,
-                               acl_len);
+               goto out_free;
        }
-       ret = acl_len;
+       nfs4_write_cached_acl(inode, pages, res.acl_data_offset, res.acl_len);
+       if (buf)
+               _copy_from_pages(buf, pages, res.acl_data_offset, res.acl_len);
+out_ok:
+       ret = res.acl_len;
 out_free:
        for (i = 0; i < npages; i++)
                if (pages[i])
@@ -3891,10 +3883,13 @@ static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t bufl
                .rpc_argp       = &arg,
                .rpc_resp       = &res,
        };
+       unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE);
        int ret, i;
 
        if (!nfs4_server_supports_acls(server))
                return -EOPNOTSUPP;
+       if (npages > ARRAY_SIZE(pages))
+               return -ERANGE;
        i = buf_to_pages_noslab(buf, buflen, arg.acl_pages, &arg.acl_pgbase);
        if (i < 0)
                return i;
index 1bfbd67c556d753a21f046c87edc3c9b07b0b8f0..8dba6bd485578695fb791f8aa548bc8ac99b4391 100644 (file)
@@ -5072,18 +5072,14 @@ static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req,
                 * are stored with the acl data to handle the problem of
                 * variable length bitmaps.*/
                res->acl_data_offset = xdr_stream_pos(xdr) - pg_offset;
-
-               /* We ignore &savep and don't do consistency checks on
-                * the attr length.  Let userspace figure it out.... */
                res->acl_len = attrlen;
-               if (attrlen > (xdr->nwords << 2)) {
-                       if (res->acl_flags & NFS4_ACL_LEN_REQUEST) {
-                               /* getxattr interface called with a NULL buf */
-                               goto out;
-                       }
+
+               /* Check for receive buffer overflow */
+               if (res->acl_len > (xdr->nwords << 2) ||
+                   res->acl_len + res->acl_data_offset > xdr->buf->page_len) {
+                       res->acl_flags |= NFS4_ACL_TRUNC;
                        dprintk("NFS: acl reply: attrlen %u > page_len %u\n",
                                        attrlen, xdr->nwords << 2);
-                       return -EINVAL;
                }
        } else
                status = -EOPNOTSUPP;
@@ -6229,7 +6225,8 @@ static int nfs4_xdr_dec_open(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
        status = decode_open(xdr, res);
        if (status)
                goto out;
-       if (decode_getfh(xdr, &res->fh) != 0)
+       status = decode_getfh(xdr, &res->fh);
+       if (status)
                goto out;
        decode_getfattr(xdr, res->f_attr, res->server);
 out:
index 239aff7338eb89ee8c0d4080694178317d84929e..b8eda700584bfbd25086d898a21b3fd03959030d 100644 (file)
@@ -1867,6 +1867,7 @@ static int nfs23_validate_mount_data(void *options,
 
                memcpy(sap, &data->addr, sizeof(data->addr));
                args->nfs_server.addrlen = sizeof(data->addr);
+               args->nfs_server.port = ntohs(data->addr.sin_port);
                if (!nfs_verify_server_address(sap))
                        goto out_no_address;
 
@@ -2564,6 +2565,7 @@ static int nfs4_validate_mount_data(void *options,
                        return -EFAULT;
                if (!nfs_verify_server_address(sap))
                        goto out_no_address;
+               args->nfs_server.port = ntohs(((struct sockaddr_in *)sap)->sin_port);
 
                if (data->auth_flavourlen) {
                        if (data->auth_flavourlen > 1)
index b6ff11825fc8a9c37f8d45ccf01e1fbdc1115868..40780229a03281376d4d449e896745f3f169a0d3 100644 (file)
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -58,7 +58,7 @@ EXPORT_SYMBOL(vfs_getattr);
 int vfs_fstat(unsigned int fd, struct kstat *stat)
 {
        int fput_needed;
-       struct file *f = fget_light(fd, &fput_needed);
+       struct file *f = fget_raw_light(fd, &fput_needed);
        int error = -EBADF;
 
        if (f) {
index 7f3f7ba3df6e7526b78699dd17a7439380611461..d1c6093fd3d3c8a204b6cca9dfc8ced300da075f 100644 (file)
 #include "udf_i.h"
 #include "udf_sb.h"
 
-static int udf_adinicb_readpage(struct file *file, struct page *page)
+static void __udf_adinicb_readpage(struct page *page)
 {
        struct inode *inode = page->mapping->host;
        char *kaddr;
        struct udf_inode_info *iinfo = UDF_I(inode);
 
-       BUG_ON(!PageLocked(page));
-
        kaddr = kmap(page);
-       memset(kaddr, 0, PAGE_CACHE_SIZE);
        memcpy(kaddr, iinfo->i_ext.i_data + iinfo->i_lenEAttr, inode->i_size);
+       memset(kaddr + inode->i_size, 0, PAGE_CACHE_SIZE - inode->i_size);
        flush_dcache_page(page);
        SetPageUptodate(page);
        kunmap(page);
+}
+
+static int udf_adinicb_readpage(struct file *file, struct page *page)
+{
+       BUG_ON(!PageLocked(page));
+       __udf_adinicb_readpage(page);
        unlock_page(page);
 
        return 0;
@@ -77,6 +81,25 @@ static int udf_adinicb_writepage(struct page *page,
        return 0;
 }
 
+static int udf_adinicb_write_begin(struct file *file,
+                       struct address_space *mapping, loff_t pos,
+                       unsigned len, unsigned flags, struct page **pagep,
+                       void **fsdata)
+{
+       struct page *page;
+
+       if (WARN_ON_ONCE(pos >= PAGE_CACHE_SIZE))
+               return -EIO;
+       page = grab_cache_page_write_begin(mapping, 0, flags);
+       if (!page)
+               return -ENOMEM;
+       *pagep = page;
+
+       if (!PageUptodate(page) && len != PAGE_CACHE_SIZE)
+               __udf_adinicb_readpage(page);
+       return 0;
+}
+
 static int udf_adinicb_write_end(struct file *file,
                        struct address_space *mapping,
                        loff_t pos, unsigned len, unsigned copied,
@@ -98,8 +121,8 @@ static int udf_adinicb_write_end(struct file *file,
 const struct address_space_operations udf_adinicb_aops = {
        .readpage       = udf_adinicb_readpage,
        .writepage      = udf_adinicb_writepage,
-       .write_begin = simple_write_begin,
-       .write_end = udf_adinicb_write_end,
+       .write_begin    = udf_adinicb_write_begin,
+       .write_end      = udf_adinicb_write_end,
 };
 
 static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
index 64ec644808bcb82951f38eaa682cb536e605d7bf..555d0337ad955bbdccd6a7f5b261e9d0777462c3 100644 (file)
@@ -3,7 +3,6 @@
 
 #include <linux/kernel.h>
 #include <linux/cpu.h>
-#include <linux/cpuidle.h>
 #include <linux/thermal.h>
 #include <asm/acpi.h>
 
@@ -59,13 +58,11 @@ struct acpi_processor_cx {
        u8 entry_method;
        u8 index;
        u32 latency;
-       u32 power;
        u8 bm_sts_skip;
        char desc[ACPI_CX_DESC_LEN];
 };
 
 struct acpi_processor_power {
-       struct cpuidle_device dev;
        struct acpi_processor_cx *state;
        unsigned long bm_check_timestamp;
        u32 default_state;
@@ -325,12 +322,10 @@ extern void acpi_processor_reevaluate_tstate(struct acpi_processor *pr,
 extern const struct file_operations acpi_processor_throttling_fops;
 extern void acpi_processor_throttling_init(void);
 /* in processor_idle.c */
-int acpi_processor_power_init(struct acpi_processor *pr,
-                             struct acpi_device *device);
+int acpi_processor_power_init(struct acpi_processor *pr);
+int acpi_processor_power_exit(struct acpi_processor *pr);
 int acpi_processor_cst_has_changed(struct acpi_processor *pr);
 int acpi_processor_hotplug(struct acpi_processor *pr);
-int acpi_processor_power_exit(struct acpi_processor *pr,
-                             struct acpi_device *device);
 int acpi_processor_suspend(struct device *dev);
 int acpi_processor_resume(struct device *dev);
 extern struct cpuidle_driver acpi_idle_driver;
index bdf0152cbbe95b2747f0736366f520eabb193ea9..f4621184a9b404f8f7a81dfb130258def67cbd58 100644 (file)
 #define DRM_FORMAT_NV16                fourcc_code('N', 'V', '1', '6') /* 2x1 subsampled Cr:Cb plane */
 #define DRM_FORMAT_NV61                fourcc_code('N', 'V', '6', '1') /* 2x1 subsampled Cb:Cr plane */
 
-/* 2 non contiguous plane YCbCr */
-#define DRM_FORMAT_NV12M       fourcc_code('N', 'M', '1', '2') /* 2x2 subsampled Cr:Cb plane */
+/* special NV12 tiled format */
 #define DRM_FORMAT_NV12MT      fourcc_code('T', 'M', '1', '2') /* 2x2 subsampled Cr:Cb plane 64x32 macroblocks */
 
 /*
 #define DRM_FORMAT_YUV444      fourcc_code('Y', 'U', '2', '4') /* non-subsampled Cb (1) and Cr (2) planes */
 #define DRM_FORMAT_YVU444      fourcc_code('Y', 'V', '2', '4') /* non-subsampled Cr (1) and Cb (2) planes */
 
-/* 3 non contiguous plane YCbCr */
-#define DRM_FORMAT_YUV420M     fourcc_code('Y', 'M', '1', '2') /* 2x2 subsampled Cb (1) and Cr (2) planes */
-
 #endif /* DRM_FOURCC_H */
index 06023393fba97cfc069ae119f66d84b1bdb0b256..4eb31752e2b77592e8a2fdbf3fde779d4851504d 100644 (file)
@@ -3,6 +3,7 @@
 
 #include <linux/platform_device.h>
 #include <linux/list.h>
+#include <linux/io.h>
 
 struct ssc_device {
        struct list_head        list;
index acba894374a1537b4b961eee222e9c28b53845cf..8a7096fcb01ee1354e1d46c67d5d1ff9919bc742 100644 (file)
@@ -97,6 +97,8 @@ struct clock_event_device {
        void                    (*broadcast)(const struct cpumask *mask);
        void                    (*set_mode)(enum clock_event_mode mode,
                                            struct clock_event_device *);
+       void                    (*suspend)(struct clock_event_device *);
+       void                    (*resume)(struct clock_event_device *);
        unsigned long           min_delta_ticks;
        unsigned long           max_delta_ticks;
 
@@ -156,6 +158,9 @@ clockevents_calc_mult_shift(struct clock_event_device *ce, u32 freq, u32 minsec)
                                      freq, minsec);
 }
 
+extern void clockevents_suspend(void);
+extern void clockevents_resume(void);
+
 #ifdef CONFIG_GENERIC_CLOCKEVENTS
 extern void clockevents_notify(unsigned long reason, void *arg);
 #else
@@ -164,6 +169,9 @@ extern void clockevents_notify(unsigned long reason, void *arg);
 
 #else /* CONFIG_GENERIC_CLOCKEVENTS_BUILD */
 
+static inline void clockevents_suspend(void) {}
+static inline void clockevents_resume(void) {}
+
 #define clockevents_notify(reason, arg) do { } while (0)
 
 #endif
index 52a5f15a2223ecb916391138ca9cb44f5d5d108a..86529e642d6c047ac2399ba0c21c69e229adf755 100644 (file)
@@ -772,6 +772,13 @@ static inline void pm_suspend_ignore_children(struct device *dev, bool enable)
        dev->power.ignore_children = enable;
 }
 
+static inline void dev_pm_syscore_device(struct device *dev, bool val)
+{
+#ifdef CONFIG_PM_SLEEP
+       dev->power.syscore = val;
+#endif
+}
+
 static inline void device_lock(struct device *dev)
 {
        mutex_lock(&dev->mutex);
index 1bc74afe7a35c7ea248510874910525d0206c9c3..49ed17fdf0556436cd626e52e0a9cd9809bc610a 100644 (file)
@@ -22,6 +22,7 @@ struct i2c_pnx_mif {
        struct timer_list       timer;          /* Timeout */
        u8 *                    buf;            /* Data buffer */
        int                     len;            /* Length of data buffer */
+       int                     order;          /* RX Bytes to order via TX */
 };
 
 struct i2c_pnx_algo_data {
index 604382143bcfccd6772260ed56e16589800af83c..594b419b7d20229cf79715b1250124491abe4ca6 100644 (file)
        __x - (__x % (y));                              \
 }                                                      \
 )
+
+/*
+ * Divide positive or negative dividend by positive divisor and round
+ * to closest integer. Result is undefined for negative divisors.
+ */
 #define DIV_ROUND_CLOSEST(x, divisor)(                 \
 {                                                      \
-       typeof(divisor) __divisor = divisor;            \
-       (((x) + ((__divisor) / 2)) / (__divisor));      \
+       typeof(x) __x = x;                              \
+       typeof(divisor) __d = divisor;                  \
+       (((typeof(x))-1) >= 0 || (__x) >= 0) ?          \
+               (((__x) + ((__d) / 2)) / (__d)) :       \
+               (((__x) - ((__d) / 2)) / (__d));        \
 }                                                      \
 )
 
index fc615a97e2d363df686f6111988cf686d8e03dfa..1e57449395b16db43ecfb638b71acc533e9ec73b 100644 (file)
@@ -224,7 +224,7 @@ static inline int kobject_uevent_env(struct kobject *kobj,
 
 static inline __printf(2, 3)
 int add_uevent_var(struct kobj_uevent_env *env, const char *format, ...)
-{ return 0; }
+{ return -ENOMEM; }
 
 static inline int kobject_action_type(const char *buf, size_t count,
                                      enum kobject_action *type)
index d0752eca9b4495011f6f82b20ba4e487b06ad6da..9d96d5d4dfed30f19c5ddeb04bacacd44f08bd45 100644 (file)
@@ -183,7 +183,7 @@ extern int  mISDN_initbchannel(struct bchannel *, unsigned short,
                                   unsigned short);
 extern int     mISDN_freedchannel(struct dchannel *);
 extern void    mISDN_clear_bchannel(struct bchannel *);
-extern int     mISDN_freebchannel(struct bchannel *);
+extern void    mISDN_freebchannel(struct bchannel *);
 extern int     mISDN_ctrl_bchannel(struct bchannel *, struct mISDN_ctrl_req *);
 extern void    queue_ch_frame(struct mISDNchannel *, u_int,
                        int, struct sk_buff *);
index 3a8435a8058f1cec9357b3f980efb0eae18f5569..cebe97ee98b86e1d6f6ef478ddf84693d8486d6b 100644 (file)
@@ -16,6 +16,8 @@
 
 #include <linux/platform_device.h>
 
+struct irq_domain;
+
 /*
  * This struct describes the MFD part ("cell").
  * After registration the copy of this structure will become the platform data
@@ -98,7 +100,7 @@ static inline const struct mfd_cell *mfd_get_cell(struct platform_device *pdev)
 extern int mfd_add_devices(struct device *parent, int id,
                           struct mfd_cell *cells, int n_devs,
                           struct resource *mem_base,
-                          int irq_base);
+                          int irq_base, struct irq_domain *irq_domain);
 
 extern void mfd_remove_devices(struct device *parent);
 
index 12c06870829af2add4caed14a9ea57f069b7479f..7cd83d826ed82285099c55cc7008dc558feddcb2 100644 (file)
@@ -22,6 +22,9 @@
 #include <linux/regulator/driver.h>
 #include <linux/regulator/machine.h>
 
+/* TPS chip id list */
+#define TPS65217                       0xF0
+
 /* I2C ID for TPS65217 part */
 #define TPS65217_I2C_ID                        0x24
 
@@ -248,13 +251,11 @@ struct tps_info {
 struct tps65217 {
        struct device *dev;
        struct tps65217_board *pdata;
+       unsigned int id;
        struct regulator_desc desc[TPS65217_NUM_REGULATOR];
        struct regulator_dev *rdev[TPS65217_NUM_REGULATOR];
        struct tps_info *info[TPS65217_NUM_REGULATOR];
        struct regmap *regmap;
-
-       /* Client devices */
-       struct platform_device *regulator_pdev[TPS65217_NUM_REGULATOR];
 };
 
 static inline struct tps65217 *dev_to_tps65217(struct device *dev)
@@ -262,6 +263,11 @@ static inline struct tps65217 *dev_to_tps65217(struct device *dev)
        return dev_get_drvdata(dev);
 }
 
+static inline int tps65217_chip_id(struct tps65217 *tps65217)
+{
+       return tps65217->id;
+}
+
 int tps65217_reg_read(struct tps65217 *tps, unsigned int reg,
                                        unsigned int *val);
 int tps65217_reg_write(struct tps65217 *tps, unsigned int reg,
index bd6c9fcdf2dd30c29b582a38e5f5c3f1eb320b62..6e1b0f973a03511b398154a5d42f3a9174b9268a 100644 (file)
@@ -796,6 +796,19 @@ enum mlx4_net_trans_rule_id {
        MLX4_NET_TRANS_RULE_NUM, /* should be last */
 };
 
+extern const u16 __sw_id_hw[];
+
+static inline int map_hw_to_sw_id(u16 header_id)
+{
+
+       int i;
+       for (i = 0; i < MLX4_NET_TRANS_RULE_NUM; i++) {
+               if (header_id == __sw_id_hw[i])
+                       return i;
+       }
+       return -EINVAL;
+}
+
 enum mlx4_net_trans_promisc_mode {
        MLX4_FS_PROMISC_NONE = 0,
        MLX4_FS_PROMISC_UPLINK,
index 111aca5e97f3d1801ace7ed7c009af4165a3869a..4b27f9f503e4a25a18f664ea7bd84c9b385d11be 100644 (file)
@@ -239,6 +239,7 @@ struct mmc_card {
 #define MMC_QUIRK_BLK_NO_CMD23 (1<<7)          /* Avoid CMD23 for regular multiblock */
 #define MMC_QUIRK_BROKEN_BYTE_MODE_512 (1<<8)  /* Avoid sending 512 bytes in */
 #define MMC_QUIRK_LONG_READ_TIME (1<<9)                /* Data read time > CSD says */
+#define MMC_QUIRK_SEC_ERASE_TRIM_BROKEN (1<<10)        /* Skip secure for erase/trim */
                                                /* byte mode */
        unsigned int    poweroff_notify_state;  /* eMMC4.5 notify feature */
 #define MMC_NO_POWER_NOTIFICATION      0
index 1f8fc7f9bcd8b8eb0d07588ba671b9327e53fe90..4b03f56e280eb9e59f236806ce24ce36e435c9c7 100644 (file)
@@ -265,11 +265,6 @@ static inline const struct nfs_rpc_ops *NFS_PROTO(const struct inode *inode)
        return NFS_SERVER(inode)->nfs_client->rpc_ops;
 }
 
-static inline __be32 *NFS_COOKIEVERF(const struct inode *inode)
-{
-       return NFS_I(inode)->cookieverf;
-}
-
 static inline unsigned NFS_MINATTRTIMEO(const struct inode *inode)
 {
        struct nfs_server *nfss = NFS_SERVER(inode);
index ac7c8ae254f251933e48f04d5c877eaaa3ec6e09..be9cf3c7e79ec0afcc0e024f6b95da5ab2bbd97e 100644 (file)
@@ -652,7 +652,7 @@ struct nfs_getaclargs {
 };
 
 /* getxattr ACL interface flags */
-#define NFS4_ACL_LEN_REQUEST   0x0001  /* zero length getxattr buffer */
+#define NFS4_ACL_TRUNC         0x0001  /* ACL was truncated */
 struct nfs_getaclres {
        size_t                          acl_len;
        size_t                          acl_data_offset;
index 2a4e5faee904fcfd85fd54eba39901978bdeecf1..214e0ebcb84d85a80b31ee98bf4647eb25fd538c 100644 (file)
@@ -48,6 +48,14 @@ int opp_disable(struct device *dev, unsigned long freq);
 
 struct srcu_notifier_head *opp_get_notifier(struct device *dev);
 
+#ifdef CONFIG_OF
+int of_init_opp_table(struct device *dev);
+#else
+static inline int of_init_opp_table(struct device *dev)
+{
+       return -EINVAL;
+}
+#endif /* CONFIG_OF */
 #else
 static inline unsigned long opp_get_voltage(struct opp *opp)
 {
index fc35260773489d55724f87d3493f5d5b23ab1f57..6b4565c440c8795eecf21044e9a8dc76b630687d 100644 (file)
 #define PCI_DEVICE_ID_TIGON3_5704S     0x16a8
 #define PCI_DEVICE_ID_NX2_57800_VF     0x16a9
 #define PCI_DEVICE_ID_NX2_5706S                0x16aa
-#define PCI_DEVICE_ID_NX2_57840_MF     0x16ab
+#define PCI_DEVICE_ID_NX2_57840_MF     0x16a4
 #define PCI_DEVICE_ID_NX2_5708S                0x16ac
 #define PCI_DEVICE_ID_NX2_57840_VF     0x16ad
 #define PCI_DEVICE_ID_NX2_57810_MF     0x16ae
index 7602ccb3f40ec672001be2eae9be3395604493f2..33ed9d605f9195eafaaeec43b7bf1d577dd8d59a 100644 (file)
@@ -926,7 +926,7 @@ struct perf_event {
        struct hw_perf_event            hw;
 
        struct perf_event_context       *ctx;
-       struct file                     *filp;
+       atomic_long_t                   refcount;
 
        /*
         * These accumulate total time (in nanoseconds) that children
@@ -1296,6 +1296,7 @@ extern int perf_swevent_get_recursion_context(void);
 extern void perf_swevent_put_recursion_context(int rctx);
 extern void perf_event_enable(struct perf_event *event);
 extern void perf_event_disable(struct perf_event *event);
+extern int __perf_event_disable(void *info);
 extern void perf_event_task_tick(void);
 #else
 static inline void
@@ -1334,6 +1335,7 @@ static inline int  perf_swevent_get_recursion_context(void)               { return -1; }
 static inline void perf_swevent_put_recursion_context(int rctx)                { }
 static inline void perf_event_enable(struct perf_event *event)         { }
 static inline void perf_event_disable(struct perf_event *event)                { }
+static inline int __perf_event_disable(void *info)                     { return -1; }
 static inline void perf_event_task_tick(void)                          { }
 #endif
 
index f067e60a38322fa3a935249cc75c185a8bf522af..44d1f2307dbc3a9d21ed0bb6a0e1948e91415b1a 100644 (file)
@@ -510,12 +510,14 @@ struct dev_pm_info {
        bool                    is_prepared:1;  /* Owned by the PM core */
        bool                    is_suspended:1; /* Ditto */
        bool                    ignore_children:1;
+       bool                    early_init:1;   /* Owned by the PM core */
        spinlock_t              lock;
 #ifdef CONFIG_PM_SLEEP
        struct list_head        entry;
        struct completion       completion;
        struct wakeup_source    *wakeup;
        bool                    wakeup_path:1;
+       bool                    syscore:1;
 #else
        unsigned int            should_wakeup:1;
 #endif
index a7d6172922d405c950c701bf4a7d2d21bb4e0c77..7c1d252b20c08de0ec725976c67b6fc13326e2e0 100644 (file)
@@ -114,7 +114,6 @@ struct generic_pm_domain_data {
        struct mutex lock;
        unsigned int refcount;
        bool need_restore;
-       bool always_on;
 };
 
 #ifdef CONFIG_PM_GENERIC_DOMAINS
@@ -139,36 +138,32 @@ extern int __pm_genpd_of_add_device(struct device_node *genpd_node,
                                    struct device *dev,
                                    struct gpd_timing_data *td);
 
-static inline int pm_genpd_add_device(struct generic_pm_domain *genpd,
-                                     struct device *dev)
-{
-       return __pm_genpd_add_device(genpd, dev, NULL);
-}
-
-static inline int pm_genpd_of_add_device(struct device_node *genpd_node,
-                                        struct device *dev)
-{
-       return __pm_genpd_of_add_device(genpd_node, dev, NULL);
-}
+extern int __pm_genpd_name_add_device(const char *domain_name,
+                                     struct device *dev,
+                                     struct gpd_timing_data *td);
 
 extern int pm_genpd_remove_device(struct generic_pm_domain *genpd,
                                  struct device *dev);
-extern void pm_genpd_dev_always_on(struct device *dev, bool val);
 extern void pm_genpd_dev_need_restore(struct device *dev, bool val);
 extern int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
                                  struct generic_pm_domain *new_subdomain);
+extern int pm_genpd_add_subdomain_names(const char *master_name,
+                                       const char *subdomain_name);
 extern int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
                                     struct generic_pm_domain *target);
 extern int pm_genpd_add_callbacks(struct device *dev,
                                  struct gpd_dev_ops *ops,
                                  struct gpd_timing_data *td);
 extern int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td);
-extern int genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state);
-extern int genpd_detach_cpuidle(struct generic_pm_domain *genpd);
+extern int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state);
+extern int pm_genpd_name_attach_cpuidle(const char *name, int state);
+extern int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd);
+extern int pm_genpd_name_detach_cpuidle(const char *name);
 extern void pm_genpd_init(struct generic_pm_domain *genpd,
                          struct dev_power_governor *gov, bool is_off);
 
 extern int pm_genpd_poweron(struct generic_pm_domain *genpd);
+extern int pm_genpd_name_poweron(const char *domain_name);
 
 extern bool default_stop_ok(struct device *dev);
 
@@ -189,8 +184,15 @@ static inline int __pm_genpd_add_device(struct generic_pm_domain *genpd,
 {
        return -ENOSYS;
 }
-static inline int pm_genpd_add_device(struct generic_pm_domain *genpd,
-                                     struct device *dev)
+static inline int __pm_genpd_of_add_device(struct device_node *genpd_node,
+                                          struct device *dev,
+                                          struct gpd_timing_data *td)
+{
+       return -ENOSYS;
+}
+static inline int __pm_genpd_name_add_device(const char *domain_name,
+                                            struct device *dev,
+                                            struct gpd_timing_data *td)
 {
        return -ENOSYS;
 }
@@ -199,13 +201,17 @@ static inline int pm_genpd_remove_device(struct generic_pm_domain *genpd,
 {
        return -ENOSYS;
 }
-static inline void pm_genpd_dev_always_on(struct device *dev, bool val) {}
 static inline void pm_genpd_dev_need_restore(struct device *dev, bool val) {}
 static inline int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
                                         struct generic_pm_domain *new_sd)
 {
        return -ENOSYS;
 }
+static inline int pm_genpd_add_subdomain_names(const char *master_name,
+                                              const char *subdomain_name)
+{
+       return -ENOSYS;
+}
 static inline int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
                                            struct generic_pm_domain *target)
 {
@@ -221,11 +227,19 @@ static inline int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
 {
        return -ENOSYS;
 }
-static inline int genpd_attach_cpuidle(struct generic_pm_domain *genpd, int st)
+static inline int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int st)
 {
        return -ENOSYS;
 }
-static inline int genpd_detach_cpuidle(struct generic_pm_domain *genpd)
+static inline int pm_genpd_name_attach_cpuidle(const char *name, int state)
+{
+       return -ENOSYS;
+}
+static inline int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
+{
+       return -ENOSYS;
+}
+static inline int pm_genpd_name_detach_cpuidle(const char *name)
 {
        return -ENOSYS;
 }
@@ -237,6 +251,10 @@ static inline int pm_genpd_poweron(struct generic_pm_domain *genpd)
 {
        return -ENOSYS;
 }
+static inline int pm_genpd_name_poweron(const char *domain_name)
+{
+       return -ENOSYS;
+}
 static inline bool default_stop_ok(struct device *dev)
 {
        return false;
@@ -245,6 +263,24 @@ static inline bool default_stop_ok(struct device *dev)
 #define pm_domain_always_on_gov NULL
 #endif
 
+static inline int pm_genpd_add_device(struct generic_pm_domain *genpd,
+                                     struct device *dev)
+{
+       return __pm_genpd_add_device(genpd, dev, NULL);
+}
+
+static inline int pm_genpd_of_add_device(struct device_node *genpd_node,
+                                        struct device *dev)
+{
+       return __pm_genpd_of_add_device(genpd_node, dev, NULL);
+}
+
+static inline int pm_genpd_name_add_device(const char *domain_name,
+                                          struct device *dev)
+{
+       return __pm_genpd_name_add_device(domain_name, dev, NULL);
+}
+
 static inline int pm_genpd_remove_callbacks(struct device *dev)
 {
        return __pm_genpd_remove_callbacks(dev, true);
@@ -258,4 +294,20 @@ static inline void genpd_queue_power_off_work(struct generic_pm_domain *gpd) {}
 static inline void pm_genpd_poweroff_unused(void) {}
 #endif
 
+#ifdef CONFIG_PM_GENERIC_DOMAINS_SLEEP
+extern void pm_genpd_syscore_switch(struct device *dev, bool suspend);
+#else
+static inline void pm_genpd_syscore_switch(struct device *dev, bool suspend) {}
+#endif
+
+static inline void pm_genpd_syscore_poweroff(struct device *dev)
+{
+       pm_genpd_syscore_switch(dev, true);
+}
+
+static inline void pm_genpd_syscore_poweron(struct device *dev)
+{
+       pm_genpd_syscore_switch(dev, false);
+}
+
 #endif /* _LINUX_PM_DOMAIN_H */
index b8c86648a2f95dc6f83aab668fd9a7e07f277b4e..23bddac4bad8d08f3781d1e8a453aa41edb28632 100644 (file)
@@ -954,7 +954,6 @@ struct sched_domain {
        unsigned int smt_gain;
        int flags;                      /* See SD_* */
        int level;
-       int idle_buddy;                 /* cpu assigned to select_idle_sibling() */
 
        /* Runtime fields. */
        unsigned long last_balance;     /* init to jiffies. units in jiffies */
index cff40aa7db625bbb9dafd6b5842d3dc70276c682..bf8c49ff7530c7ee8b85d4a8b3ccef5de86ff308 100644 (file)
@@ -114,6 +114,7 @@ struct rpc_xprt_ops {
        void            (*set_buffer_size)(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize);
        int             (*reserve_xprt)(struct rpc_xprt *xprt, struct rpc_task *task);
        void            (*release_xprt)(struct rpc_xprt *xprt, struct rpc_task *task);
+       void            (*alloc_slot)(struct rpc_xprt *xprt, struct rpc_task *task);
        void            (*rpcbind)(struct rpc_task *task);
        void            (*set_port)(struct rpc_xprt *xprt, unsigned short port);
        void            (*connect)(struct rpc_task *task);
@@ -281,6 +282,8 @@ void                        xprt_connect(struct rpc_task *task);
 void                   xprt_reserve(struct rpc_task *task);
 int                    xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task);
 int                    xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task);
+void                   xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task);
+void                   xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task);
 int                    xprt_prepare_transmit(struct rpc_task *task);
 void                   xprt_transmit(struct rpc_task *task);
 void                   xprt_end_transmit(struct rpc_task *task);
index ca356a7349202272236b9d7db421f6d8804d89a5..8b27927b2a55de3dfd5f94c5a40ef3c3b886eb06 100644 (file)
@@ -136,7 +136,7 @@ struct smp_chan {
 };
 
 /* SMP Commands */
-int smp_conn_security(struct l2cap_conn *conn, __u8 sec_level);
+int smp_conn_security(struct hci_conn *hcon, __u8 sec_level);
 int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb);
 int smp_distribute_keys(struct l2cap_conn *conn, __u8 force);
 int smp_user_confirm_reply(struct hci_conn *conn, u16 mgmt_op, __le32 passkey);
index e1ce1048fe5fa1142196cb88e08829b7bb1d60f5..4a045cda9c60c75a96b956b051dfbf5a6d6581b7 100644 (file)
@@ -18,6 +18,7 @@ struct nf_conntrack_ecache {
        u16 ctmask;             /* bitmask of ct events to be delivered */
        u16 expmask;            /* bitmask of expect events to be delivered */
        u32 pid;                /* netlink pid of destroyer */
+       struct timer_list timeout;
 };
 
 static inline struct nf_conntrack_ecache *
index 976a81abe1a231de348085a92c8f3110a21fc3fa..639dd1316d375aeb2802c73032cc4cae6dcb8b0c 100644 (file)
@@ -273,6 +273,9 @@ struct xfrm_replay {
        int     (*check)(struct xfrm_state *x,
                         struct sk_buff *skb,
                         __be32 net_seq);
+       int     (*recheck)(struct xfrm_state *x,
+                          struct sk_buff *skb,
+                          __be32 net_seq);
        void    (*notify)(struct xfrm_state *x, int event);
        int     (*overflow)(struct xfrm_state *x, struct sk_buff *skb);
 };
index f1405d335a968d093aadafee06ab4be105970f88..941c84bf1065f406cfbc3eeb5f4b09e4fdcd58ec 100644 (file)
@@ -23,7 +23,9 @@ struct se_subsystem_api {
        struct se_device *(*create_virtdevice)(struct se_hba *,
                                struct se_subsystem_dev *, void *);
        void (*free_device)(void *);
-       int (*transport_complete)(struct se_cmd *cmd, struct scatterlist *);
+       void (*transport_complete)(struct se_cmd *cmd,
+                                  struct scatterlist *,
+                                  unsigned char *);
 
        int (*parse_cdb)(struct se_cmd *cmd);
        ssize_t (*check_configfs_dev_params)(struct se_hba *,
index 015cea01ae39bedd1a1bf796e23a8f90f061e5e2..5be89373ceac659c92e671a463c80f146656c416 100644 (file)
 
 #define SE_INQUIRY_BUF                         512
 #define SE_MODE_PAGE_BUF                       512
+#define SE_SENSE_BUF                           96
 
 /* struct se_hba->hba_flags */
 enum hba_flags_table {
index b7935fcec7d923b0b0b89fe0fe7dfdf61967b447..7fee567153f022cc2a097135a524bf393887e1a2 100644 (file)
@@ -1253,7 +1253,7 @@ retry:
 /*
  * Cross CPU call to disable a performance event
  */
-static int __perf_event_disable(void *info)
+int __perf_event_disable(void *info)
 {
        struct perf_event *event = info;
        struct perf_event_context *ctx = event->ctx;
@@ -2935,12 +2935,12 @@ EXPORT_SYMBOL_GPL(perf_event_release_kernel);
 /*
  * Called when the last reference to the file is gone.
  */
-static int perf_release(struct inode *inode, struct file *file)
+static void put_event(struct perf_event *event)
 {
-       struct perf_event *event = file->private_data;
        struct task_struct *owner;
 
-       file->private_data = NULL;
+       if (!atomic_long_dec_and_test(&event->refcount))
+               return;
 
        rcu_read_lock();
        owner = ACCESS_ONCE(event->owner);
@@ -2975,7 +2975,13 @@ static int perf_release(struct inode *inode, struct file *file)
                put_task_struct(owner);
        }
 
-       return perf_event_release_kernel(event);
+       perf_event_release_kernel(event);
+}
+
+static int perf_release(struct inode *inode, struct file *file)
+{
+       put_event(file->private_data);
+       return 0;
 }
 
 u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
@@ -3227,7 +3233,7 @@ unlock:
 
 static const struct file_operations perf_fops;
 
-static struct perf_event *perf_fget_light(int fd, int *fput_needed)
+static struct file *perf_fget_light(int fd, int *fput_needed)
 {
        struct file *file;
 
@@ -3241,7 +3247,7 @@ static struct perf_event *perf_fget_light(int fd, int *fput_needed)
                return ERR_PTR(-EBADF);
        }
 
-       return file->private_data;
+       return file;
 }
 
 static int perf_event_set_output(struct perf_event *event,
@@ -3273,19 +3279,21 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 
        case PERF_EVENT_IOC_SET_OUTPUT:
        {
+               struct file *output_file = NULL;
                struct perf_event *output_event = NULL;
                int fput_needed = 0;
                int ret;
 
                if (arg != -1) {
-                       output_event = perf_fget_light(arg, &fput_needed);
-                       if (IS_ERR(output_event))
-                               return PTR_ERR(output_event);
+                       output_file = perf_fget_light(arg, &fput_needed);
+                       if (IS_ERR(output_file))
+                               return PTR_ERR(output_file);
+                       output_event = output_file->private_data;
                }
 
                ret = perf_event_set_output(event, output_event);
                if (output_event)
-                       fput_light(output_event->filp, fput_needed);
+                       fput_light(output_file, fput_needed);
 
                return ret;
        }
@@ -5950,6 +5958,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
 
        mutex_init(&event->mmap_mutex);
 
+       atomic_long_set(&event->refcount, 1);
        event->cpu              = cpu;
        event->attr             = *attr;
        event->group_leader     = group_leader;
@@ -6260,12 +6269,12 @@ SYSCALL_DEFINE5(perf_event_open,
                return event_fd;
 
        if (group_fd != -1) {
-               group_leader = perf_fget_light(group_fd, &fput_needed);
-               if (IS_ERR(group_leader)) {
-                       err = PTR_ERR(group_leader);
+               group_file = perf_fget_light(group_fd, &fput_needed);
+               if (IS_ERR(group_file)) {
+                       err = PTR_ERR(group_file);
                        goto err_fd;
                }
-               group_file = group_leader->filp;
+               group_leader = group_file->private_data;
                if (flags & PERF_FLAG_FD_OUTPUT)
                        output_event = group_leader;
                if (flags & PERF_FLAG_FD_NO_GROUP)
@@ -6402,7 +6411,6 @@ SYSCALL_DEFINE5(perf_event_open,
                put_ctx(gctx);
        }
 
-       event->filp = event_file;
        WARN_ON_ONCE(ctx->parent_ctx);
        mutex_lock(&ctx->mutex);
 
@@ -6496,7 +6504,6 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
                goto err_free;
        }
 
-       event->filp = NULL;
        WARN_ON_ONCE(ctx->parent_ctx);
        mutex_lock(&ctx->mutex);
        perf_install_in_context(ctx, event, cpu);
@@ -6578,7 +6585,7 @@ static void sync_child_event(struct perf_event *child_event,
         * Release the parent event, if this was the last
         * reference to it.
         */
-       fput(parent_event->filp);
+       put_event(parent_event);
 }
 
 static void
@@ -6654,9 +6661,8 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
         *
         *   __perf_event_exit_task()
         *     sync_child_event()
-        *       fput(parent_event->filp)
-        *         perf_release()
-        *           mutex_lock(&ctx->mutex)
+        *       put_event()
+        *         mutex_lock(&ctx->mutex)
         *
         * But since its the parent context it won't be the same instance.
         */
@@ -6724,7 +6730,7 @@ static void perf_free_event(struct perf_event *event,
        list_del_init(&event->child_list);
        mutex_unlock(&parent->child_mutex);
 
-       fput(parent->filp);
+       put_event(parent);
 
        perf_group_detach(event);
        list_del_event(event, ctx);
@@ -6804,6 +6810,12 @@ inherit_event(struct perf_event *parent_event,
                                           NULL, NULL);
        if (IS_ERR(child_event))
                return child_event;
+
+       if (!atomic_long_inc_not_zero(&parent_event->refcount)) {
+               free_event(child_event);
+               return NULL;
+       }
+
        get_ctx(child_ctx);
 
        /*
@@ -6844,14 +6856,6 @@ inherit_event(struct perf_event *parent_event,
        add_event_to_ctx(child_event, child_ctx);
        raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
 
-       /*
-        * Get a reference to the parent filp - we will fput it
-        * when the child event exits. This is safe to do because
-        * we are in the parent and we know that the filp still
-        * exists and has a nonzero count:
-        */
-       atomic_long_inc(&parent_event->filp->f_count);
-
        /*
         * Link this into the parent event's child list
         */
index bb38c4d3ee129ab06c1b46dc295ca6864c39c416..9a7b487c6fe240c1a2e4f5c70ef68da6370ebf78 100644 (file)
@@ -453,7 +453,16 @@ int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *att
        int old_type = bp->attr.bp_type;
        int err = 0;
 
-       perf_event_disable(bp);
+       /*
+        * modify_user_hw_breakpoint can be invoked with IRQs disabled and hence it
+        * will not be possible to raise IPIs that invoke __perf_event_disable.
+        * So call the function directly after making sure we are targeting the
+        * current task.
+        */
+       if (irqs_disabled() && bp->ctx && bp->ctx->task == current)
+               __perf_event_disable(bp);
+       else
+               perf_event_disable(bp);
 
        bp->attr.bp_addr = attr->bp_addr;
        bp->attr.bp_type = attr->bp_type;
index a70518c9d82f42c57eb400208ce95dd4dd2aafaa..5dfdc9ea180b8ac497ed915c45b809fb65799261 100644 (file)
@@ -263,6 +263,10 @@ config PM_GENERIC_DOMAINS
        bool
        depends on PM
 
+config PM_GENERIC_DOMAINS_SLEEP
+       def_bool y
+       depends on PM_SLEEP && PM_GENERIC_DOMAINS
+
 config PM_GENERIC_DOMAINS_RUNTIME
        def_bool y
        depends on PM_RUNTIME && PM_GENERIC_DOMAINS
index d52359374e8501e8c1f4e8a88427468249509fb3..68197a4e8fc95e9f9bac2c609d0b69909d8961f2 100644 (file)
@@ -37,7 +37,7 @@ static struct sysrq_key_op    sysrq_poweroff_op = {
        .enable_mask    = SYSRQ_ENABLE_BOOT,
 };
 
-static int pm_sysrq_init(void)
+static int __init pm_sysrq_init(void)
 {
        register_sysrq_key('o', &sysrq_poweroff_op);
        return 0;
index 19db29f67558fef712764d78d7feccc0318ed650..87da817f9e132204add38dc73685962081de9dd8 100644 (file)
@@ -79,7 +79,7 @@ static int try_to_freeze_tasks(bool user_only)
 
                /*
                 * We need to retry, but first give the freezing tasks some
-                * time to enter the regrigerator.
+                * time to enter the refrigerator.
                 */
                msleep(10);
        }
index fbf1fd098dc6cca687f0e9296a931aa0425c6fee..649c9f876cb164b0e16683479b59bf4d4f3b794b 100644 (file)
@@ -5304,27 +5304,17 @@ void idle_task_exit(void)
 }
 
 /*
- * While a dead CPU has no uninterruptible tasks queued at this point,
- * it might still have a nonzero ->nr_uninterruptible counter, because
- * for performance reasons the counter is not stricly tracking tasks to
- * their home CPUs. So we just add the counter to another CPU's counter,
- * to keep the global sum constant after CPU-down:
- */
-static void migrate_nr_uninterruptible(struct rq *rq_src)
-{
-       struct rq *rq_dest = cpu_rq(cpumask_any(cpu_active_mask));
-
-       rq_dest->nr_uninterruptible += rq_src->nr_uninterruptible;
-       rq_src->nr_uninterruptible = 0;
-}
-
-/*
- * remove the tasks which were accounted by rq from calc_load_tasks.
+ * Since this CPU is going 'away' for a while, fold any nr_active delta
+ * we might have. Assumes we're called after migrate_tasks() so that the
+ * nr_active count is stable.
+ *
+ * Also see the comment "Global load-average calculations".
  */
-static void calc_global_load_remove(struct rq *rq)
+static void calc_load_migrate(struct rq *rq)
 {
-       atomic_long_sub(rq->calc_load_active, &calc_load_tasks);
-       rq->calc_load_active = 0;
+       long delta = calc_load_fold_active(rq);
+       if (delta)
+               atomic_long_add(delta, &calc_load_tasks);
 }
 
 /*
@@ -5352,9 +5342,6 @@ static void migrate_tasks(unsigned int dead_cpu)
         */
        rq->stop = NULL;
 
-       /* Ensure any throttled groups are reachable by pick_next_task */
-       unthrottle_offline_cfs_rqs(rq);
-
        for ( ; ; ) {
                /*
                 * There's this thread running, bail when that's the only
@@ -5618,8 +5605,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
                BUG_ON(rq->nr_running != 1); /* the migration thread */
                raw_spin_unlock_irqrestore(&rq->lock, flags);
 
-               migrate_nr_uninterruptible(rq);
-               calc_global_load_remove(rq);
+               calc_load_migrate(rq);
                break;
 #endif
        }
@@ -6028,11 +6014,6 @@ static void destroy_sched_domains(struct sched_domain *sd, int cpu)
  * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this
  * allows us to avoid some pointer chasing select_idle_sibling().
  *
- * Iterate domains and sched_groups downward, assigning CPUs to be
- * select_idle_sibling() hw buddy.  Cross-wiring hw makes bouncing
- * due to random perturbation self canceling, ie sw buddies pull
- * their counterpart to their CPU's hw counterpart.
- *
  * Also keep a unique ID per domain (we use the first cpu number in
  * the cpumask of the domain), this allows us to quickly tell if
  * two cpus are in the same cache domain, see cpus_share_cache().
@@ -6046,40 +6027,8 @@ static void update_top_cache_domain(int cpu)
        int id = cpu;
 
        sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES);
-       if (sd) {
-               struct sched_domain *tmp = sd;
-               struct sched_group *sg, *prev;
-               bool right;
-
-               /*
-                * Traverse to first CPU in group, and count hops
-                * to cpu from there, switching direction on each
-                * hop, never ever pointing the last CPU rightward.
-                */
-               do {
-                       id = cpumask_first(sched_domain_span(tmp));
-                       prev = sg = tmp->groups;
-                       right = 1;
-
-                       while (cpumask_first(sched_group_cpus(sg)) != id)
-                               sg = sg->next;
-
-                       while (!cpumask_test_cpu(cpu, sched_group_cpus(sg))) {
-                               prev = sg;
-                               sg = sg->next;
-                               right = !right;
-                       }
-
-                       /* A CPU went down, never point back to domain start. */
-                       if (right && cpumask_first(sched_group_cpus(sg->next)) == id)
-                               right = false;
-
-                       sg = right ? sg->next : prev;
-                       tmp->idle_buddy = cpumask_first(sched_group_cpus(sg));
-               } while ((tmp = tmp->child));
-
+       if (sd)
                id = cpumask_first(sched_domain_span(sd));
-       }
 
        rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
        per_cpu(sd_llc_id, cpu) = id;
index c219bf8d704c5460291abee8416264ee32d36e22..96e2b18b628312dd69e8ad2ee6e3a6e08f33cce7 100644 (file)
@@ -2052,7 +2052,7 @@ static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
        hrtimer_cancel(&cfs_b->slack_timer);
 }
 
-void unthrottle_offline_cfs_rqs(struct rq *rq)
+static void unthrottle_offline_cfs_rqs(struct rq *rq)
 {
        struct cfs_rq *cfs_rq;
 
@@ -2106,7 +2106,7 @@ static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
        return NULL;
 }
 static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
-void unthrottle_offline_cfs_rqs(struct rq *rq) {}
+static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {}
 
 #endif /* CONFIG_CFS_BANDWIDTH */
 
@@ -2637,6 +2637,8 @@ static int select_idle_sibling(struct task_struct *p, int target)
        int cpu = smp_processor_id();
        int prev_cpu = task_cpu(p);
        struct sched_domain *sd;
+       struct sched_group *sg;
+       int i;
 
        /*
         * If the task is going to be woken-up on this cpu and if it is
@@ -2653,17 +2655,29 @@ static int select_idle_sibling(struct task_struct *p, int target)
                return prev_cpu;
 
        /*
-        * Otherwise, check assigned siblings to find an elegible idle cpu.
+        * Otherwise, iterate the domains and find an elegible idle cpu.
         */
        sd = rcu_dereference(per_cpu(sd_llc, target));
-
        for_each_lower_domain(sd) {
-               if (!cpumask_test_cpu(sd->idle_buddy, tsk_cpus_allowed(p)))
-                       continue;
-               if (idle_cpu(sd->idle_buddy))
-                       return sd->idle_buddy;
-       }
+               sg = sd->groups;
+               do {
+                       if (!cpumask_intersects(sched_group_cpus(sg),
+                                               tsk_cpus_allowed(p)))
+                               goto next;
 
+                       for_each_cpu(i, sched_group_cpus(sg)) {
+                               if (!idle_cpu(i))
+                                       goto next;
+                       }
+
+                       target = cpumask_first_and(sched_group_cpus(sg),
+                                       tsk_cpus_allowed(p));
+                       goto done;
+next:
+                       sg = sg->next;
+               } while (sg != sd->groups);
+       }
+done:
        return target;
 }
 
@@ -3658,7 +3672,6 @@ fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
  * @group: sched_group whose statistics are to be updated.
  * @load_idx: Load index of sched_domain of this_cpu for load calc.
  * @local_group: Does group contain this_cpu.
- * @cpus: Set of cpus considered for load balancing.
  * @balance: Should we balance.
  * @sgs: variable to hold the statistics for this group.
  */
@@ -3805,7 +3818,6 @@ static bool update_sd_pick_busiest(struct lb_env *env,
 /**
  * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
  * @env: The load balancing environment.
- * @cpus: Set of cpus considered for load balancing.
  * @balance: Should we balance.
  * @sds: variable to hold the statistics for this sched_domain.
  */
@@ -4956,6 +4968,9 @@ static void rq_online_fair(struct rq *rq)
 static void rq_offline_fair(struct rq *rq)
 {
        update_sysctl();
+
+       /* Ensure any throttled groups are reachable by pick_next_task */
+       unthrottle_offline_cfs_rqs(rq);
 }
 
 #endif /* CONFIG_SMP */
index 944cb68420e957cbde71f9cacaaa4e81c4b1de20..e0b7ba9c040f74b22bb63e0d957b672dac4adce0 100644 (file)
@@ -691,6 +691,7 @@ balanced:
                 * runtime - in which case borrowing doesn't make sense.
                 */
                rt_rq->rt_runtime = RUNTIME_INF;
+               rt_rq->rt_throttled = 0;
                raw_spin_unlock(&rt_rq->rt_runtime_lock);
                raw_spin_unlock(&rt_b->rt_runtime_lock);
        }
index f6714d009e779a225ef295d33ceff7f2f8573be8..0848fa36c383e940a1e4245c611312dbdb7459d3 100644 (file)
@@ -1144,7 +1144,6 @@ extern void print_rt_stats(struct seq_file *m, int cpu);
 
 extern void init_cfs_rq(struct cfs_rq *cfs_rq);
 extern void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq);
-extern void unthrottle_offline_cfs_rqs(struct rq *rq);
 
 extern void account_cfs_bandwidth_used(int enabled, int was_enabled);
 
index 7e1ce012a851351c10853fe732e714afa2bb8a5c..30b6de0d977c4734eb479d7489f61e56b35afaa8 100644 (file)
@@ -397,6 +397,30 @@ void clockevents_exchange_device(struct clock_event_device *old,
        local_irq_restore(flags);
 }
 
+/**
+ * clockevents_suspend - suspend clock devices
+ */
+void clockevents_suspend(void)
+{
+       struct clock_event_device *dev;
+
+       list_for_each_entry_reverse(dev, &clockevent_devices, list)
+               if (dev->suspend)
+                       dev->suspend(dev);
+}
+
+/**
+ * clockevents_resume - resume clock devices
+ */
+void clockevents_resume(void)
+{
+       struct clock_event_device *dev;
+
+       list_for_each_entry(dev, &clockevent_devices, list)
+               if (dev->resume)
+                       dev->resume(dev);
+}
+
 #ifdef CONFIG_GENERIC_CLOCKEVENTS
 /**
  * clockevents_notify - notification about relevant events
index 024540f97f74c3e94205826f33d3968ea765f626..3a9e5d5c10916a7e67c131df489617a485a39bfc 100644 (file)
@@ -573,6 +573,7 @@ static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now)
        tick_do_update_jiffies64(now);
        update_cpu_load_nohz();
 
+       calc_load_exit_idle();
        touch_softlockup_watchdog();
        /*
         * Cancel the scheduled timer and restore the tick
index 34e5eac81424d98738246415a85d1a98a042bef1..312a675cb240e4d607bfb24081bcc17ed0308230 100644 (file)
@@ -773,6 +773,7 @@ static void timekeeping_resume(void)
 
        read_persistent_clock(&ts);
 
+       clockevents_resume();
        clocksource_resume();
 
        write_seqlock_irqsave(&tk->lock, flags);
@@ -832,6 +833,7 @@ static int timekeeping_suspend(void)
 
        clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
        clocksource_suspend();
+       clockevents_suspend();
 
        return 0;
 }
index 692d97628a106360683dfef46797952cdf1861e1..1e1373bcb3e3125f72baf77cf396689d18de9bdd 100644 (file)
@@ -66,6 +66,7 @@ enum {
 
        /* pool flags */
        POOL_MANAGE_WORKERS     = 1 << 0,       /* need to manage workers */
+       POOL_MANAGING_WORKERS   = 1 << 1,       /* managing workers */
 
        /* worker flags */
        WORKER_STARTED          = 1 << 0,       /* started */
@@ -652,7 +653,7 @@ static bool need_to_manage_workers(struct worker_pool *pool)
 /* Do we have too many workers and should some go away? */
 static bool too_many_workers(struct worker_pool *pool)
 {
-       bool managing = mutex_is_locked(&pool->manager_mutex);
+       bool managing = pool->flags & POOL_MANAGING_WORKERS;
        int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
        int nr_busy = pool->nr_workers - nr_idle;
 
@@ -1326,6 +1327,15 @@ static void idle_worker_rebind(struct worker *worker)
 
        /* we did our part, wait for rebind_workers() to finish up */
        wait_event(gcwq->rebind_hold, !(worker->flags & WORKER_REBIND));
+
+       /*
+        * rebind_workers() shouldn't finish until all workers passed the
+        * above WORKER_REBIND wait.  Tell it when done.
+        */
+       spin_lock_irq(&worker->pool->gcwq->lock);
+       if (!--worker->idle_rebind->cnt)
+               complete(&worker->idle_rebind->done);
+       spin_unlock_irq(&worker->pool->gcwq->lock);
 }
 
 /*
@@ -1396,12 +1406,15 @@ retry:
        /* set REBIND and kick idle ones, we'll wait for these later */
        for_each_worker_pool(pool, gcwq) {
                list_for_each_entry(worker, &pool->idle_list, entry) {
+                       unsigned long worker_flags = worker->flags;
+
                        if (worker->flags & WORKER_REBIND)
                                continue;
 
-                       /* morph UNBOUND to REBIND */
-                       worker->flags &= ~WORKER_UNBOUND;
-                       worker->flags |= WORKER_REBIND;
+                       /* morph UNBOUND to REBIND atomically */
+                       worker_flags &= ~WORKER_UNBOUND;
+                       worker_flags |= WORKER_REBIND;
+                       ACCESS_ONCE(worker->flags) = worker_flags;
 
                        idle_rebind.cnt++;
                        worker->idle_rebind = &idle_rebind;
@@ -1419,25 +1432,15 @@ retry:
                goto retry;
        }
 
-       /*
-        * All idle workers are rebound and waiting for %WORKER_REBIND to
-        * be cleared inside idle_worker_rebind().  Clear and release.
-        * Clearing %WORKER_REBIND from this foreign context is safe
-        * because these workers are still guaranteed to be idle.
-        */
-       for_each_worker_pool(pool, gcwq)
-               list_for_each_entry(worker, &pool->idle_list, entry)
-                       worker->flags &= ~WORKER_REBIND;
-
-       wake_up_all(&gcwq->rebind_hold);
-
-       /* rebind busy workers */
+       /* all idle workers are rebound, rebind busy workers */
        for_each_busy_worker(worker, i, pos, gcwq) {
                struct work_struct *rebind_work = &worker->rebind_work;
+               unsigned long worker_flags = worker->flags;
 
-               /* morph UNBOUND to REBIND */
-               worker->flags &= ~WORKER_UNBOUND;
-               worker->flags |= WORKER_REBIND;
+               /* morph UNBOUND to REBIND atomically */
+               worker_flags &= ~WORKER_UNBOUND;
+               worker_flags |= WORKER_REBIND;
+               ACCESS_ONCE(worker->flags) = worker_flags;
 
                if (test_and_set_bit(WORK_STRUCT_PENDING_BIT,
                                     work_data_bits(rebind_work)))
@@ -1449,6 +1452,34 @@ retry:
                            worker->scheduled.next,
                            work_color_to_flags(WORK_NO_COLOR));
        }
+
+       /*
+        * All idle workers are rebound and waiting for %WORKER_REBIND to
+        * be cleared inside idle_worker_rebind().  Clear and release.
+        * Clearing %WORKER_REBIND from this foreign context is safe
+        * because these workers are still guaranteed to be idle.
+        *
+        * We need to make sure all idle workers passed WORKER_REBIND wait
+        * in idle_worker_rebind() before returning; otherwise, workers can
+        * get stuck at the wait if hotplug cycle repeats.
+        */
+       idle_rebind.cnt = 1;
+       INIT_COMPLETION(idle_rebind.done);
+
+       for_each_worker_pool(pool, gcwq) {
+               list_for_each_entry(worker, &pool->idle_list, entry) {
+                       worker->flags &= ~WORKER_REBIND;
+                       idle_rebind.cnt++;
+               }
+       }
+
+       wake_up_all(&gcwq->rebind_hold);
+
+       if (--idle_rebind.cnt) {
+               spin_unlock_irq(&gcwq->lock);
+               wait_for_completion(&idle_rebind.done);
+               spin_lock_irq(&gcwq->lock);
+       }
 }
 
 static struct worker *alloc_worker(void)
@@ -1794,9 +1825,45 @@ static bool manage_workers(struct worker *worker)
        struct worker_pool *pool = worker->pool;
        bool ret = false;
 
-       if (!mutex_trylock(&pool->manager_mutex))
+       if (pool->flags & POOL_MANAGING_WORKERS)
                return ret;
 
+       pool->flags |= POOL_MANAGING_WORKERS;
+
+       /*
+        * To simplify both worker management and CPU hotplug, hold off
+        * management while hotplug is in progress.  CPU hotplug path can't
+        * grab %POOL_MANAGING_WORKERS to achieve this because that can
+        * lead to idle worker depletion (all become busy thinking someone
+        * else is managing) which in turn can result in deadlock under
+        * extreme circumstances.  Use @pool->manager_mutex to synchronize
+        * manager against CPU hotplug.
+        *
+        * manager_mutex would always be free unless CPU hotplug is in
+        * progress.  trylock first without dropping @gcwq->lock.
+        */
+       if (unlikely(!mutex_trylock(&pool->manager_mutex))) {
+               spin_unlock_irq(&pool->gcwq->lock);
+               mutex_lock(&pool->manager_mutex);
+               /*
+                * CPU hotplug could have happened while we were waiting
+                * for manager_mutex.  Hotplug itself can't handle us
+                * because manager isn't either on idle or busy list, and
+                * @gcwq's state and ours could have deviated.
+                *
+                * As hotplug is now excluded via manager_mutex, we can
+                * simply try to bind.  It will succeed or fail depending
+                * on @gcwq's current state.  Try it and adjust
+                * %WORKER_UNBOUND accordingly.
+                */
+               if (worker_maybe_bind_and_lock(worker))
+                       worker->flags &= ~WORKER_UNBOUND;
+               else
+                       worker->flags |= WORKER_UNBOUND;
+
+               ret = true;
+       }
+
        pool->flags &= ~POOL_MANAGE_WORKERS;
 
        /*
@@ -1806,6 +1873,7 @@ static bool manage_workers(struct worker *worker)
        ret |= maybe_destroy_workers(pool);
        ret |= maybe_create_worker(pool);
 
+       pool->flags &= ~POOL_MANAGING_WORKERS;
        mutex_unlock(&pool->manager_mutex);
        return ret;
 }
index 286d558033e270524ff3fdeac9c96393b81170a6..8c0e62975c88d49a09c9c29ab9e7a2b1334a6587 100644 (file)
@@ -163,9 +163,11 @@ static int digsig_verify_rsa(struct key *key,
        memcpy(out1 + head, p, l);
 
        err = pkcs_1_v1_5_decode_emsa(out1, len, mblen, out2, &len);
+       if (err)
+               goto err;
 
-       if (!err && len == hlen)
-               err = memcmp(out2, h, hlen);
+       if (len != hlen || memcmp(out2, h, hlen))
+               err = -EINVAL;
 
 err:
        mpi_free(in);
index 4d9393c7edc9072ff929175eec6e611788da124b..82aa349d2f7a040b489bee441bf848b61119f788 100644 (file)
@@ -246,7 +246,7 @@ static int __init_memblock memblock_double_array(struct memblock_type *type,
                                min(new_area_start, memblock.current_limit),
                                new_alloc_size, PAGE_SIZE);
 
-               new_array = addr ? __va(addr) : 0;
+               new_array = addr ? __va(addr) : NULL;
        }
        if (!addr) {
                pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
index bd92431d4c49a8e29f4b46d50d6d0e66c69098d5..4ada3be6e2521278de6da6e110995d63ccd7ed8f 100644 (file)
@@ -2562,7 +2562,7 @@ int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context)
                break;
 
        default:
-               BUG();
+               return -EINVAL;
        }
 
        l = strlen(policy_modes[mode]);
index 5ad7da21747413f50ba0106041c6e73d6ce727d4..3c094e78dde98cafed3ac893abd3b2fa86b76a92 100644 (file)
@@ -29,6 +29,7 @@
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
 #include <net/bluetooth/a2mp.h>
+#include <net/bluetooth/smp.h>
 
 static void hci_le_connect(struct hci_conn *conn)
 {
@@ -619,6 +620,9 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
 {
        BT_DBG("hcon %p", conn);
 
+       if (conn->type == LE_LINK)
+               return smp_conn_security(conn, sec_level);
+
        /* For sdp we don't need the link key. */
        if (sec_level == BT_SECURITY_SDP)
                return 1;
index daa149b7003cdd659120e79225a50c6216a6dce4..4ea1710a478329a5d4219ce686a2ef4450ca30b8 100644 (file)
@@ -1199,14 +1199,15 @@ clean:
 static void l2cap_conn_ready(struct l2cap_conn *conn)
 {
        struct l2cap_chan *chan;
+       struct hci_conn *hcon = conn->hcon;
 
        BT_DBG("conn %p", conn);
 
-       if (!conn->hcon->out && conn->hcon->type == LE_LINK)
+       if (!hcon->out && hcon->type == LE_LINK)
                l2cap_le_conn_ready(conn);
 
-       if (conn->hcon->out && conn->hcon->type == LE_LINK)
-               smp_conn_security(conn, conn->hcon->pending_sec_level);
+       if (hcon->out && hcon->type == LE_LINK)
+               smp_conn_security(hcon, hcon->pending_sec_level);
 
        mutex_lock(&conn->chan_lock);
 
@@ -1219,8 +1220,8 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
                        continue;
                }
 
-               if (conn->hcon->type == LE_LINK) {
-                       if (smp_conn_security(conn, chan->sec_level))
+               if (hcon->type == LE_LINK) {
+                       if (smp_conn_security(hcon, chan->sec_level))
                                l2cap_chan_ready(chan);
 
                } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
index 1497edd191a2e04ee3121624db92547059f24369..34bbe1c5e389500f080e15b30c194e95ea36f189 100644 (file)
@@ -616,7 +616,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
                                break;
                        }
 
-                       if (smp_conn_security(conn, sec.level))
+                       if (smp_conn_security(conn->hcon, sec.level))
                                break;
                        sk->sk_state = BT_CONFIG;
                        chan->state = BT_CONFIG;
index 901a616c8083e22f5163f8bbd1613b1529c63519..8c225ef349cd733614dfeaca0f2f1bceccdae064 100644 (file)
@@ -267,10 +267,10 @@ static void smp_failure(struct l2cap_conn *conn, u8 reason, u8 send)
        mgmt_auth_failed(conn->hcon->hdev, conn->dst, hcon->type,
                         hcon->dst_type, reason);
 
-       if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
-               cancel_delayed_work_sync(&conn->security_timer);
+       cancel_delayed_work_sync(&conn->security_timer);
+
+       if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags))
                smp_chan_destroy(conn);
-       }
 }
 
 #define JUST_WORKS     0x00
@@ -760,9 +760,9 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
        return 0;
 }
 
-int smp_conn_security(struct l2cap_conn *conn, __u8 sec_level)
+int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
 {
-       struct hci_conn *hcon = conn->hcon;
+       struct l2cap_conn *conn = hcon->l2cap_data;
        struct smp_chan *smp = conn->smp_chan;
        __u8 authreq;
 
index f88ee537fb2b811347c109cfa19dbdd34c2923c0..92de5e5f9db211fb004a5b8f095c2bd215276682 100644 (file)
@@ -80,7 +80,7 @@ ebt_log_packet(u_int8_t pf, unsigned int hooknum,
        unsigned int bitmask;
 
        spin_lock_bh(&ebt_log_lock);
-       printk("<%c>%s IN=%s OUT=%s MAC source = %pM MAC dest = %pM proto = 0x%04x",
+       printk(KERN_SOH "%c%s IN=%s OUT=%s MAC source = %pM MAC dest = %pM proto = 0x%04x",
               '0' + loginfo->u.log.level, prefix,
               in ? in->name : "", out ? out->name : "",
               eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
index dd485f6128e81df5f8b1b454f57dac3b3871158a..ba217e90765e11024251feae5bbcb46450101ea7 100644 (file)
@@ -211,9 +211,10 @@ void caif_client_register_refcnt(struct cflayer *adapt_layer,
                                        void (*put)(struct cflayer *lyr))
 {
        struct cfsrvl *service;
-       service = container_of(adapt_layer->dn, struct cfsrvl, layer);
 
-       WARN_ON(adapt_layer == NULL || adapt_layer->dn == NULL);
+       if (WARN_ON(adapt_layer == NULL || adapt_layer->dn == NULL))
+               return;
+       service = container_of(adapt_layer->dn, struct cfsrvl, layer);
        service->hold = hold;
        service->put = put;
 }
index 83988362805ef1453efb37bfcb3692f59f4edd21..d7fe32c946c1a472b46f086929badaea6900d432 100644 (file)
@@ -2647,15 +2647,16 @@ void __skb_get_rxhash(struct sk_buff *skb)
        if (!skb_flow_dissect(skb, &keys))
                return;
 
-       if (keys.ports) {
-               if ((__force u16)keys.port16[1] < (__force u16)keys.port16[0])
-                       swap(keys.port16[0], keys.port16[1]);
+       if (keys.ports)
                skb->l4_rxhash = 1;
-       }
 
        /* get a consistent hash (same value on both flow directions) */
-       if ((__force u32)keys.dst < (__force u32)keys.src)
+       if (((__force u32)keys.dst < (__force u32)keys.src) ||
+           (((__force u32)keys.dst == (__force u32)keys.src) &&
+            ((__force u16)keys.port16[1] < (__force u16)keys.port16[0]))) {
                swap(keys.dst, keys.src);
+               swap(keys.port16[0], keys.port16[1]);
+       }
 
        hash = jhash_3words((__force u32)keys.dst,
                            (__force u32)keys.src,
index 346b1eb83a1f0336ed1e9dcf4f5905b733022dff..e4ba3e70c1747684ad480815f67b2410e87974a7 100644 (file)
@@ -168,24 +168,16 @@ static void poll_napi(struct net_device *dev)
        struct napi_struct *napi;
        int budget = 16;
 
-       WARN_ON_ONCE(!irqs_disabled());
-
        list_for_each_entry(napi, &dev->napi_list, dev_list) {
-               local_irq_enable();
                if (napi->poll_owner != smp_processor_id() &&
                    spin_trylock(&napi->poll_lock)) {
-                       rcu_read_lock_bh();
                        budget = poll_one_napi(rcu_dereference_bh(dev->npinfo),
                                               napi, budget);
-                       rcu_read_unlock_bh();
                        spin_unlock(&napi->poll_lock);
 
-                       if (!budget) {
-                               local_irq_disable();
+                       if (!budget)
                                break;
-                       }
                }
-               local_irq_disable();
        }
 }
 
index cce9e53528b169a67a8b5cd8bf0e568460587e24..148e73d2c4515d777d577733f32205c38d03932e 100644 (file)
@@ -2721,7 +2721,7 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
        /* Eth + IPh + UDPh + mpls */
        datalen = pkt_dev->cur_pkt_size - 14 - 20 - 8 -
                  pkt_dev->pkt_overhead;
-       if (datalen < sizeof(struct pktgen_hdr))
+       if (datalen < 0 || datalen < sizeof(struct pktgen_hdr))
                datalen = sizeof(struct pktgen_hdr);
 
        udph->source = htons(pkt_dev->cur_udp_src);
index 8f67ced8d6a808689255435dd412df132138af65..30579207612175f0a65b19310368079c54ce4bc9 100644 (file)
@@ -1523,7 +1523,14 @@ EXPORT_SYMBOL(sock_rfree);
 
 void sock_edemux(struct sk_buff *skb)
 {
-       sock_put(skb->sk);
+       struct sock *sk = skb->sk;
+
+#ifdef CONFIG_INET
+       if (sk->sk_state == TCP_TIME_WAIT)
+               inet_twsk_put(inet_twsk(sk));
+       else
+#endif
+               sock_put(sk);
 }
 EXPORT_SYMBOL(sock_edemux);
 
index 8eec8f4a05360d24897719495100ea100f0c69b2..ebdf06f938bf040eebc91763c3e952c63d00f92b 100644 (file)
@@ -124,6 +124,8 @@ static DEFINE_SPINLOCK(mfc_unres_lock);
 static struct kmem_cache *mrt_cachep __read_mostly;
 
 static struct mr_table *ipmr_new_table(struct net *net, u32 id);
+static void ipmr_free_table(struct mr_table *mrt);
+
 static int ip_mr_forward(struct net *net, struct mr_table *mrt,
                         struct sk_buff *skb, struct mfc_cache *cache,
                         int local);
@@ -131,6 +133,7 @@ static int ipmr_cache_report(struct mr_table *mrt,
                             struct sk_buff *pkt, vifi_t vifi, int assert);
 static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
                              struct mfc_cache *c, struct rtmsg *rtm);
+static void mroute_clean_tables(struct mr_table *mrt);
 static void ipmr_expire_process(unsigned long arg);
 
 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
@@ -271,7 +274,7 @@ static void __net_exit ipmr_rules_exit(struct net *net)
 
        list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) {
                list_del(&mrt->list);
-               kfree(mrt);
+               ipmr_free_table(mrt);
        }
        fib_rules_unregister(net->ipv4.mr_rules_ops);
 }
@@ -299,7 +302,7 @@ static int __net_init ipmr_rules_init(struct net *net)
 
 static void __net_exit ipmr_rules_exit(struct net *net)
 {
-       kfree(net->ipv4.mrt);
+       ipmr_free_table(net->ipv4.mrt);
 }
 #endif
 
@@ -336,6 +339,13 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id)
        return mrt;
 }
 
+static void ipmr_free_table(struct mr_table *mrt)
+{
+       del_timer_sync(&mrt->ipmr_expire_timer);
+       mroute_clean_tables(mrt);
+       kfree(mrt);
+}
+
 /* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
 
 static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v)
index 4ad9cf1739922cfb4b13d3135d01381dd70bf0be..9c87cde28ff831472cc7072a05526d753727d1fa 100644 (file)
@@ -502,7 +502,10 @@ static unsigned int ip_nat_sdp_media(struct sk_buff *skb, unsigned int dataoff,
                ret = nf_ct_expect_related(rtcp_exp);
                if (ret == 0)
                        break;
-               else if (ret != -EBUSY) {
+               else if (ret == -EBUSY) {
+                       nf_ct_unexpect_related(rtp_exp);
+                       continue;
+               } else if (ret < 0) {
                        nf_ct_unexpect_related(rtp_exp);
                        port = 0;
                        break;
index fd9ecb52c66bf8815c09e678c452017e9a7e2122..82cf2a722b2310803ec603f60d59cdf819481a4a 100644 (file)
@@ -934,12 +934,14 @@ static u32 __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
        if (mtu < ip_rt_min_pmtu)
                mtu = ip_rt_min_pmtu;
 
+       rcu_read_lock();
        if (fib_lookup(dev_net(rt->dst.dev), fl4, &res) == 0) {
                struct fib_nh *nh = &FIB_RES_NH(res);
 
                update_or_create_fnhe(nh, fl4->daddr, 0, mtu,
                                      jiffies + ip_rt_mtu_expires);
        }
+       rcu_read_unlock();
        return mtu;
 }
 
@@ -956,7 +958,7 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
                dst->obsolete = DST_OBSOLETE_KILL;
        } else {
                rt->rt_pmtu = mtu;
-               dst_set_expires(&rt->dst, ip_rt_mtu_expires);
+               rt->dst.expires = max(1UL, jiffies + ip_rt_mtu_expires);
        }
 }
 
@@ -1263,7 +1265,7 @@ static void ipv4_dst_destroy(struct dst_entry *dst)
 {
        struct rtable *rt = (struct rtable *) dst;
 
-       if (dst->flags & DST_NOCACHE) {
+       if (!list_empty(&rt->rt_uncached)) {
                spin_lock_bh(&rt_uncached_lock);
                list_del(&rt->rt_uncached);
                spin_unlock_bh(&rt_uncached_lock);
index 85308b90df80a844119a8cc773783f78aab5de04..6e38c6c23caa69601216e193507460663c1926ec 100644 (file)
@@ -2926,13 +2926,14 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack)
  * tcp_xmit_retransmit_queue().
  */
 static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
-                                 int newly_acked_sacked, bool is_dupack,
+                                 int prior_sacked, bool is_dupack,
                                  int flag)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
        int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) &&
                                    (tcp_fackets_out(tp) > tp->reordering));
+       int newly_acked_sacked = 0;
        int fast_rexmit = 0;
 
        if (WARN_ON(!tp->packets_out && tp->sacked_out))
@@ -2992,6 +2993,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
                                tcp_add_reno_sack(sk);
                } else
                        do_lost = tcp_try_undo_partial(sk, pkts_acked);
+               newly_acked_sacked = pkts_acked + tp->sacked_out - prior_sacked;
                break;
        case TCP_CA_Loss:
                if (flag & FLAG_DATA_ACKED)
@@ -3013,6 +3015,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
                        if (is_dupack)
                                tcp_add_reno_sack(sk);
                }
+               newly_acked_sacked = pkts_acked + tp->sacked_out - prior_sacked;
 
                if (icsk->icsk_ca_state <= TCP_CA_Disorder)
                        tcp_try_undo_dsack(sk);
@@ -3590,7 +3593,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
        int prior_packets;
        int prior_sacked = tp->sacked_out;
        int pkts_acked = 0;
-       int newly_acked_sacked = 0;
        bool frto_cwnd = false;
 
        /* If the ack is older than previous acks
@@ -3666,8 +3668,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
        flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una);
 
        pkts_acked = prior_packets - tp->packets_out;
-       newly_acked_sacked = (prior_packets - prior_sacked) -
-                            (tp->packets_out - tp->sacked_out);
 
        if (tp->frto_counter)
                frto_cwnd = tcp_process_frto(sk, flag);
@@ -3681,7 +3681,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
                    tcp_may_raise_cwnd(sk, flag))
                        tcp_cong_avoid(sk, ack, prior_in_flight);
                is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
-               tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked,
+               tcp_fastretrans_alert(sk, pkts_acked, prior_sacked,
                                      is_dupack, flag);
        } else {
                if ((flag & FLAG_DATA_ACKED) && !frto_cwnd)
@@ -3698,7 +3698,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
 no_queue:
        /* If data was DSACKed, see if we can undo a cwnd reduction. */
        if (flag & FLAG_DSACKING_ACK)
-               tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked,
+               tcp_fastretrans_alert(sk, pkts_acked, prior_sacked,
                                      is_dupack, flag);
        /* If this ack opens up a zero window, clear backoff.  It was
         * being used to time the probes, and is probably far higher than
@@ -3718,8 +3718,7 @@ old_ack:
         */
        if (TCP_SKB_CB(skb)->sacked) {
                flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una);
-               newly_acked_sacked = tp->sacked_out - prior_sacked;
-               tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked,
+               tcp_fastretrans_alert(sk, pkts_acked, prior_sacked,
                                      is_dupack, flag);
        }
 
index 6f6d1aca3c3de0e21036c075c0a13c429ef4f8ae..2814f66dac64cf5775806138c91903c7a02eeae3 100644 (file)
@@ -1226,6 +1226,11 @@ try_again:
 
        if (unlikely(err)) {
                trace_kfree_skb(skb, udp_recvmsg);
+               if (!peeked) {
+                       atomic_inc(&sk->sk_drops);
+                       UDP_INC_STATS_USER(sock_net(sk),
+                                          UDP_MIB_INERRORS, is_udplite);
+               }
                goto out_free;
        }
 
index 6dc7fd353ef53f9c08c9202340fc953bf3b294c5..282f3723ee194704ab7fa0757e2c032254903300 100644 (file)
@@ -167,8 +167,6 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
        struct esp_data *esp = x->data;
 
        /* skb is pure payload to encrypt */
-       err = -ENOMEM;
-
        aead = esp->aead;
        alen = crypto_aead_authsize(aead);
 
@@ -203,8 +201,10 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
        }
 
        tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen);
-       if (!tmp)
+       if (!tmp) {
+               err = -ENOMEM;
                goto error;
+       }
 
        seqhi = esp_tmp_seqhi(tmp);
        iv = esp_tmp_iv(aead, tmp, seqhilen);
index a3e60cc04a8a17e229afb44f38643ed86ac0c63e..acd32e3f1b68e7c11fd211383e05b50dfd07ee6a 100644 (file)
@@ -403,8 +403,9 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                tp->mtu_info = ntohl(info);
                if (!sock_owned_by_user(sk))
                        tcp_v6_mtu_reduced(sk);
-               else
-                       set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags);
+               else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
+                                          &tp->tsq_flags))
+                       sock_hold(sk);
                goto out;
        }
 
index 99d0077b56b86f088a4a2fd2819a7b03e0eabd49..07e2bfef6845429ee7e359a6c21141db0a0219de 100644 (file)
@@ -394,6 +394,17 @@ try_again:
        }
        if (unlikely(err)) {
                trace_kfree_skb(skb, udpv6_recvmsg);
+               if (!peeked) {
+                       atomic_inc(&sk->sk_drops);
+                       if (is_udp4)
+                               UDP_INC_STATS_USER(sock_net(sk),
+                                                  UDP_MIB_INERRORS,
+                                                  is_udplite);
+                       else
+                               UDP6_INC_STATS_USER(sock_net(sk),
+                                                   UDP_MIB_INERRORS,
+                                                   is_udplite);
+               }
                goto out_free;
        }
        if (!peeked) {
index 393355d37b476bc67219d57221759c3a1bdb52e8..1a9f3723c13cb45b608bbc07fe4a9803df926523 100644 (file)
@@ -1347,11 +1347,10 @@ static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
        /* Remove from tunnel list */
        spin_lock_bh(&pn->l2tp_tunnel_list_lock);
        list_del_rcu(&tunnel->list);
+       kfree_rcu(tunnel, rcu);
        spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
-       synchronize_rcu();
 
        atomic_dec(&l2tp_tunnel_count);
-       kfree(tunnel);
 }
 
 /* Create a socket for the tunnel, if one isn't set up by
@@ -1502,6 +1501,8 @@ out:
        return err;
 }
 
+static struct lock_class_key l2tp_socket_class;
+
 int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp)
 {
        struct l2tp_tunnel *tunnel = NULL;
@@ -1606,6 +1607,8 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
        tunnel->old_sk_destruct = sk->sk_destruct;
        sk->sk_destruct = &l2tp_tunnel_destruct;
        tunnel->sock = sk;
+       lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class, "l2tp_sock");
+
        sk->sk_allocation = GFP_ATOMIC;
 
        /* Add tunnel to our list */
index a38ec6cdeee1a7ba81dc0ea9f659ca3f46bccb46..56d583e083a7baf7b0cf707a76882a88701d8025 100644 (file)
@@ -163,6 +163,7 @@ struct l2tp_tunnel_cfg {
 
 struct l2tp_tunnel {
        int                     magic;          /* Should be L2TP_TUNNEL_MAGIC */
+       struct rcu_head rcu;
        rwlock_t                hlist_lock;     /* protect session_hlist */
        struct hlist_head       session_hlist[L2TP_HASH_SIZE];
                                                /* hashed list of sessions,
index f9ee74deeac26f5469271a0800669cc82ffddc63..3bfb34aaee293cb697f36ae88a060f3329571214 100644 (file)
@@ -153,7 +153,7 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
                print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, skb->data, length);
        }
 
-       if (!pskb_may_pull(skb, sizeof(ETH_HLEN)))
+       if (!pskb_may_pull(skb, ETH_HLEN))
                goto error;
 
        secpath_reset(skb);
index d41974aacf5168597fd559f1c976252f9e36ffd9..a58c0b649ba137b09214c031bf3508b5fe2974eb 100644 (file)
@@ -1378,6 +1378,8 @@ static void mpath_set_pinfo(struct mesh_path *mpath, u8 *next_hop,
        else
                memset(next_hop, 0, ETH_ALEN);
 
+       memset(pinfo, 0, sizeof(*pinfo));
+
        pinfo->generation = mesh_paths_generation;
 
        pinfo->filled = MPATH_INFO_FRAME_QLEN |
@@ -1396,7 +1398,6 @@ static void mpath_set_pinfo(struct mesh_path *mpath, u8 *next_hop,
        pinfo->discovery_timeout =
                        jiffies_to_msecs(mpath->discovery_timeout);
        pinfo->discovery_retries = mpath->discovery_retries;
-       pinfo->flags = 0;
        if (mpath->flags & MESH_PATH_ACTIVE)
                pinfo->flags |= NL80211_MPATH_FLAG_ACTIVE;
        if (mpath->flags & MESH_PATH_RESOLVING)
@@ -1405,10 +1406,8 @@ static void mpath_set_pinfo(struct mesh_path *mpath, u8 *next_hop,
                pinfo->flags |= NL80211_MPATH_FLAG_SN_VALID;
        if (mpath->flags & MESH_PATH_FIXED)
                pinfo->flags |= NL80211_MPATH_FLAG_FIXED;
-       if (mpath->flags & MESH_PATH_RESOLVING)
-               pinfo->flags |= NL80211_MPATH_FLAG_RESOLVING;
-
-       pinfo->flags = mpath->flags;
+       if (mpath->flags & MESH_PATH_RESOLVED)
+               pinfo->flags |= NL80211_MPATH_FLAG_RESOLVED;
 }
 
 static int ieee80211_get_mpath(struct wiphy *wiphy, struct net_device *dev,
index a4a5acdbaa4dd3ac5e2fb8c5f0ff1d1b97553d6a..f76b83341cf9a39db0e14092a85f2e576245a306 100644 (file)
@@ -3248,6 +3248,8 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
        goto out_unlock;
 
  err_clear:
+       memset(ifmgd->bssid, 0, ETH_ALEN);
+       ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID);
        ifmgd->auth_data = NULL;
  err_free:
        kfree(auth_data);
@@ -3439,6 +3441,8 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
        err = 0;
        goto out;
  err_clear:
+       memset(ifmgd->bssid, 0, ETH_ALEN);
+       ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID);
        ifmgd->assoc_data = NULL;
  err_free:
        kfree(assoc_data);
index acf712ffb5e630b3c1fc74c6390c7606d4f043bd..c5e8c9c31f7687d9922d0011ea1b31e8244ea8d2 100644 (file)
@@ -1811,37 +1811,31 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
                        meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr,
                                        sdata, NULL, NULL);
                } else {
-                       int is_mesh_mcast = 1;
-                       const u8 *mesh_da;
+                       /* DS -> MBSS (802.11-2012 13.11.3.3).
+                        * For unicast with unknown forwarding information,
+                        * destination might be in the MBSS or if that fails
+                        * forwarded to another mesh gate. In either case
+                        * resolution will be handled in ieee80211_xmit(), so
+                        * leave the original DA. This also works for mcast */
+                       const u8 *mesh_da = skb->data;
+
+                       if (mppath)
+                               mesh_da = mppath->mpp;
+                       else if (mpath)
+                               mesh_da = mpath->dst;
+                       rcu_read_unlock();
 
-                       if (is_multicast_ether_addr(skb->data))
-                               /* DA TA mSA AE:SA */
-                               mesh_da = skb->data;
-                       else {
-                               static const u8 bcast[ETH_ALEN] =
-                                       { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
-                               if (mppath) {
-                                       /* RA TA mDA mSA AE:DA SA */
-                                       mesh_da = mppath->mpp;
-                                       is_mesh_mcast = 0;
-                               } else if (mpath) {
-                                       mesh_da = mpath->dst;
-                                       is_mesh_mcast = 0;
-                               } else {
-                                       /* DA TA mSA AE:SA */
-                                       mesh_da = bcast;
-                               }
-                       }
                        hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc,
                                        mesh_da, sdata->vif.addr);
-                       rcu_read_unlock();
-                       if (is_mesh_mcast)
+                       if (is_multicast_ether_addr(mesh_da))
+                               /* DA TA mSA AE:SA */
                                meshhdrlen =
                                        ieee80211_new_mesh_header(&mesh_hdr,
                                                        sdata,
                                                        skb->data + ETH_ALEN,
                                                        NULL);
                        else
+                               /* RA TA mDA mSA AE:DA SA */
                                meshhdrlen =
                                        ieee80211_new_mesh_header(&mesh_hdr,
                                                        sdata,
index 72bf32a84874718927a4bcbdc2e26395be00bdd8..f51013c07b9f4e5a81885f0462d5423ccbc8a44f 100644 (file)
@@ -1171,8 +1171,10 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
                goto out_err;
        }
        svc->stats.cpustats = alloc_percpu(struct ip_vs_cpu_stats);
-       if (!svc->stats.cpustats)
+       if (!svc->stats.cpustats) {
+               ret = -ENOMEM;
                goto out_err;
+       }
 
        /* I'm the first user of the service */
        atomic_set(&svc->usecnt, 0);
index cf4875565d6755af8cb7e3ee952b723a007f1b4c..2ceec64b19f9866a222787531ba948b4c9e7e75b 100644 (file)
@@ -249,12 +249,15 @@ static void death_by_event(unsigned long ul_conntrack)
 {
        struct nf_conn *ct = (void *)ul_conntrack;
        struct net *net = nf_ct_net(ct);
+       struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct);
+
+       BUG_ON(ecache == NULL);
 
        if (nf_conntrack_event(IPCT_DESTROY, ct) < 0) {
                /* bad luck, let's retry again */
-               ct->timeout.expires = jiffies +
+               ecache->timeout.expires = jiffies +
                        (random32() % net->ct.sysctl_events_retry_timeout);
-               add_timer(&ct->timeout);
+               add_timer(&ecache->timeout);
                return;
        }
        /* we've got the event delivered, now it's dying */
@@ -268,6 +271,9 @@ static void death_by_event(unsigned long ul_conntrack)
 void nf_ct_insert_dying_list(struct nf_conn *ct)
 {
        struct net *net = nf_ct_net(ct);
+       struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct);
+
+       BUG_ON(ecache == NULL);
 
        /* add this conntrack to the dying list */
        spin_lock_bh(&nf_conntrack_lock);
@@ -275,10 +281,10 @@ void nf_ct_insert_dying_list(struct nf_conn *ct)
                             &net->ct.dying);
        spin_unlock_bh(&nf_conntrack_lock);
        /* set a new timer to retry event delivery */
-       setup_timer(&ct->timeout, death_by_event, (unsigned long)ct);
-       ct->timeout.expires = jiffies +
+       setup_timer(&ecache->timeout, death_by_event, (unsigned long)ct);
+       ecache->timeout.expires = jiffies +
                (random32() % net->ct.sysctl_events_retry_timeout);
-       add_timer(&ct->timeout);
+       add_timer(&ecache->timeout);
 }
 EXPORT_SYMBOL_GPL(nf_ct_insert_dying_list);
 
index da4fc37a8578b6ef4590cbd0abf6e04241ebb8fc..9807f3278fcbcdfcc28c61b9b19e6a8c74d02b18 100644 (file)
@@ -2790,7 +2790,8 @@ static int __init ctnetlink_init(void)
                goto err_unreg_subsys;
        }
 
-       if (register_pernet_subsys(&ctnetlink_net_ops)) {
+       ret = register_pernet_subsys(&ctnetlink_net_ops);
+       if (ret < 0) {
                pr_err("ctnetlink_init: cannot register pernet operations\n");
                goto err_unreg_exp_subsys;
        }
index a5ac11ebef331895f39af5e86ca4406e56562ceb..e046b3756aab755080d3edced132c459b7c8d4c4 100644 (file)
@@ -158,21 +158,18 @@ static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = {
  *     sCL -> sSS
  */
 /*          sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2   */
-/*synack*/ { sIV, sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG, sSR },
+/*synack*/ { sIV, sIV, sSR, sIV, sIV, sIV, sIV, sIV, sIV, sSR },
 /*
  *     sNO -> sIV      Too late and no reason to do anything
  *     sSS -> sIV      Client can't send SYN and then SYN/ACK
  *     sS2 -> sSR      SYN/ACK sent to SYN2 in simultaneous open
- *     sSR -> sIG
- *     sES -> sIG      Error: SYNs in window outside the SYN_SENT state
- *                     are errors. Receiver will reply with RST
- *                     and close the connection.
- *                     Or we are not in sync and hold a dead connection.
- *     sFW -> sIG
- *     sCW -> sIG
- *     sLA -> sIG
- *     sTW -> sIG
- *     sCL -> sIG
+ *     sSR -> sSR      Late retransmitted SYN/ACK in simultaneous open
+ *     sES -> sIV      Invalid SYN/ACK packets sent by the client
+ *     sFW -> sIV
+ *     sCW -> sIV
+ *     sLA -> sIV
+ *     sTW -> sIV
+ *     sCL -> sIV
  */
 /*          sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2   */
 /*fin*/    { sIV, sIV, sFW, sFW, sLA, sLA, sLA, sTW, sCL, sIV },
@@ -633,15 +630,9 @@ static bool tcp_in_window(const struct nf_conn *ct,
                ack = sack = receiver->td_end;
        }
 
-       if (seq == end
-           && (!tcph->rst
-               || (seq == 0 && state->state == TCP_CONNTRACK_SYN_SENT)))
+       if (tcph->rst && seq == 0 && state->state == TCP_CONNTRACK_SYN_SENT)
                /*
-                * Packets contains no data: we assume it is valid
-                * and check the ack value only.
-                * However RST segments are always validated by their
-                * SEQ number, except when seq == 0 (reset sent answering
-                * SYN.
+                * RST sent answering SYN.
                 */
                seq = end = sender->td_end;
 
index 169ab59ed9d49073105443bfcae75c89a2f30024..5cfb5bedb2b8e8f2fa44ed936a7cab265b5878e6 100644 (file)
@@ -381,6 +381,7 @@ __build_packet_message(struct nfulnl_instance *inst,
        struct nlmsghdr *nlh;
        struct nfgenmsg *nfmsg;
        sk_buff_data_t old_tail = inst->skb->tail;
+       struct sock *sk;
 
        nlh = nlmsg_put(inst->skb, 0, 0,
                        NFNL_SUBSYS_ULOG << 8 | NFULNL_MSG_PACKET,
@@ -480,7 +481,7 @@ __build_packet_message(struct nfulnl_instance *inst,
        }
 
        if (indev && skb_mac_header_was_set(skb)) {
-               if (nla_put_be32(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)) ||
+               if (nla_put_be16(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)) ||
                    nla_put_be16(inst->skb, NFULA_HWLEN,
                                 htons(skb->dev->hard_header_len)) ||
                    nla_put(inst->skb, NFULA_HWHEADER, skb->dev->hard_header_len,
@@ -499,18 +500,19 @@ __build_packet_message(struct nfulnl_instance *inst,
        }
 
        /* UID */
-       if (skb->sk) {
-               read_lock_bh(&skb->sk->sk_callback_lock);
-               if (skb->sk->sk_socket && skb->sk->sk_socket->file) {
-                       struct file *file = skb->sk->sk_socket->file;
+       sk = skb->sk;
+       if (sk && sk->sk_state != TCP_TIME_WAIT) {
+               read_lock_bh(&sk->sk_callback_lock);
+               if (sk->sk_socket && sk->sk_socket->file) {
+                       struct file *file = sk->sk_socket->file;
                        __be32 uid = htonl(file->f_cred->fsuid);
                        __be32 gid = htonl(file->f_cred->fsgid);
-                       read_unlock_bh(&skb->sk->sk_callback_lock);
+                       read_unlock_bh(&sk->sk_callback_lock);
                        if (nla_put_be32(inst->skb, NFULA_UID, uid) ||
                            nla_put_be32(inst->skb, NFULA_GID, gid))
                                goto nla_put_failure;
                } else
-                       read_unlock_bh(&skb->sk->sk_callback_lock);
+                       read_unlock_bh(&sk->sk_callback_lock);
        }
 
        /* local sequence number */
@@ -996,8 +998,10 @@ static int __init nfnetlink_log_init(void)
 
 #ifdef CONFIG_PROC_FS
        if (!proc_create("nfnetlink_log", 0440,
-                        proc_net_netfilter, &nful_file_ops))
+                        proc_net_netfilter, &nful_file_ops)) {
+               status = -ENOMEM;
                goto cleanup_logger;
+       }
 #endif
        return status;
 
index ff5f75fddb15175c408a1e0a1cf8a656453aff80..91e9af4d1f42c3baef9af1261c9464c70cd1bac0 100644 (file)
@@ -145,6 +145,19 @@ static int dump_tcp_header(struct sbuff *m, const struct sk_buff *skb,
        return 0;
 }
 
+static void dump_sk_uid_gid(struct sbuff *m, struct sock *sk)
+{
+       if (!sk || sk->sk_state == TCP_TIME_WAIT)
+               return;
+
+       read_lock_bh(&sk->sk_callback_lock);
+       if (sk->sk_socket && sk->sk_socket->file)
+               sb_add(m, "UID=%u GID=%u ",
+                       sk->sk_socket->file->f_cred->fsuid,
+                       sk->sk_socket->file->f_cred->fsgid);
+       read_unlock_bh(&sk->sk_callback_lock);
+}
+
 /* One level of recursion won't kill us */
 static void dump_ipv4_packet(struct sbuff *m,
                        const struct nf_loginfo *info,
@@ -361,14 +374,8 @@ static void dump_ipv4_packet(struct sbuff *m,
        }
 
        /* Max length: 15 "UID=4294967295 " */
-       if ((logflags & XT_LOG_UID) && !iphoff && skb->sk) {
-               read_lock_bh(&skb->sk->sk_callback_lock);
-               if (skb->sk->sk_socket && skb->sk->sk_socket->file)
-                       sb_add(m, "UID=%u GID=%u ",
-                               skb->sk->sk_socket->file->f_cred->fsuid,
-                               skb->sk->sk_socket->file->f_cred->fsgid);
-               read_unlock_bh(&skb->sk->sk_callback_lock);
-       }
+       if ((logflags & XT_LOG_UID) && !iphoff)
+               dump_sk_uid_gid(m, skb->sk);
 
        /* Max length: 16 "MARK=0xFFFFFFFF " */
        if (!iphoff && skb->mark)
@@ -436,8 +443,8 @@ log_packet_common(struct sbuff *m,
                  const struct nf_loginfo *loginfo,
                  const char *prefix)
 {
-       sb_add(m, "<%d>%sIN=%s OUT=%s ", loginfo->u.log.level,
-              prefix,
+       sb_add(m, KERN_SOH "%c%sIN=%s OUT=%s ",
+              '0' + loginfo->u.log.level, prefix,
               in ? in->name : "",
               out ? out->name : "");
 #ifdef CONFIG_BRIDGE_NETFILTER
@@ -717,14 +724,8 @@ static void dump_ipv6_packet(struct sbuff *m,
        }
 
        /* Max length: 15 "UID=4294967295 " */
-       if ((logflags & XT_LOG_UID) && recurse && skb->sk) {
-               read_lock_bh(&skb->sk->sk_callback_lock);
-               if (skb->sk->sk_socket && skb->sk->sk_socket->file)
-                       sb_add(m, "UID=%u GID=%u ",
-                               skb->sk->sk_socket->file->f_cred->fsuid,
-                               skb->sk->sk_socket->file->f_cred->fsgid);
-               read_unlock_bh(&skb->sk->sk_callback_lock);
-       }
+       if ((logflags & XT_LOG_UID) && recurse)
+               dump_sk_uid_gid(m, skb->sk);
 
        /* Max length: 16 "MARK=0xFFFFFFFF " */
        if (!recurse && skb->mark)
index 1445d73533ed13ac9aa43dd85d6f58b4a006c130..527023823b5c5ea1a48c373b49e9f1688891a494 100644 (file)
@@ -1373,7 +1373,8 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
                dst_pid = addr->nl_pid;
                dst_group = ffs(addr->nl_groups);
                err =  -EPERM;
-               if (dst_group && !netlink_capable(sock, NL_NONROOT_SEND))
+               if ((dst_group || dst_pid) &&
+                   !netlink_capable(sock, NL_NONROOT_SEND))
                        goto out;
        } else {
                dst_pid = nlk->dst_pid;
@@ -2147,6 +2148,7 @@ static void __init netlink_add_usersock_entry(void)
        rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
        nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
        nl_table[NETLINK_USERSOCK].registered = 1;
+       nl_table[NETLINK_USERSOCK].nl_nonroot = NL_NONROOT_SEND;
 
        netlink_table_ungrab();
 }
index 06592d8b4a2b4eba33d9e71e44fc75ca206b38a8..1b9024ee963c64f33b9240a0726dd67b83711567 100644 (file)
@@ -1169,7 +1169,12 @@ static int nr_recvmsg(struct kiocb *iocb, struct socket *sock,
                msg->msg_flags |= MSG_TRUNC;
        }
 
-       skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+       er = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+       if (er < 0) {
+               skb_free_datagram(sk, skb);
+               release_sock(sk);
+               return er;
+       }
 
        if (sax != NULL) {
                sax->sax25_family = AF_NETROM;
index f3f96badf5aac0202a2bd54155d595b0373a18df..954405ceae9ed5141293d3f47ce7c784aeee3c31 100644 (file)
@@ -45,7 +45,7 @@ static int make_writable(struct sk_buff *skb, int write_len)
        return pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
 }
 
-/* remove VLAN header from packet and update csum accrodingly. */
+/* remove VLAN header from packet and update csum accordingly. */
 static int __pop_vlan_tci(struct sk_buff *skb, __be16 *current_tci)
 {
        struct vlan_hdr *vhdr;
index d8277d29e7102caf343d78b802f3371283c40de1..cf58cedad0833f9e9e704401fdecb5480c121caf 100644 (file)
@@ -425,10 +425,10 @@ static int validate_sample(const struct nlattr *attr,
 static int validate_tp_port(const struct sw_flow_key *flow_key)
 {
        if (flow_key->eth.type == htons(ETH_P_IP)) {
-               if (flow_key->ipv4.tp.src && flow_key->ipv4.tp.dst)
+               if (flow_key->ipv4.tp.src || flow_key->ipv4.tp.dst)
                        return 0;
        } else if (flow_key->eth.type == htons(ETH_P_IPV6)) {
-               if (flow_key->ipv6.tp.src && flow_key->ipv6.tp.dst)
+               if (flow_key->ipv6.tp.src || flow_key->ipv6.tp.dst)
                        return 0;
        }
 
@@ -460,7 +460,7 @@ static int validate_set(const struct nlattr *a,
                if (flow_key->eth.type != htons(ETH_P_IP))
                        return -EINVAL;
 
-               if (!flow_key->ipv4.addr.src || !flow_key->ipv4.addr.dst)
+               if (!flow_key->ip.proto)
                        return -EINVAL;
 
                ipv4_key = nla_data(ovs_key);
index 9b75617ca4e031db60ddbeb609bb658af7f8de0c..c30df1a10c670ad01b7b8b88c49434c95c0c659e 100644 (file)
@@ -145,15 +145,17 @@ u64 ovs_flow_used_time(unsigned long flow_jiffies);
  *  OVS_KEY_ATTR_PRIORITY      4    --     4      8
  *  OVS_KEY_ATTR_IN_PORT       4    --     4      8
  *  OVS_KEY_ATTR_ETHERNET     12    --     4     16
+ *  OVS_KEY_ATTR_ETHERTYPE     2     2     4      8  (outer VLAN ethertype)
  *  OVS_KEY_ATTR_8021Q         4    --     4      8
- *  OVS_KEY_ATTR_ETHERTYPE     2     2     4      8
+ *  OVS_KEY_ATTR_ENCAP         0    --     4      4  (VLAN encapsulation)
+ *  OVS_KEY_ATTR_ETHERTYPE     2     2     4      8  (inner VLAN ethertype)
  *  OVS_KEY_ATTR_IPV6         40    --     4     44
  *  OVS_KEY_ATTR_ICMPV6        2     2     4      8
  *  OVS_KEY_ATTR_ND           28    --     4     32
  *  -------------------------------------------------
- *  total                                       132
+ *  total                                       144
  */
-#define FLOW_BUFSIZE 132
+#define FLOW_BUFSIZE 144
 
 int ovs_flow_to_nlattrs(const struct sw_flow_key *, struct sk_buff *);
 int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp,
index aee7196aac36c990eedfc7b494a04351c29bb167..c5c9e2a54218207f0dba9b16920b2e17da84c353 100644 (file)
@@ -1273,7 +1273,7 @@ static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
        spin_unlock(&f->lock);
 }
 
-bool match_fanout_group(struct packet_type *ptype, struct sock * sk)
+static bool match_fanout_group(struct packet_type *ptype, struct sock * sk)
 {
        if (ptype->af_packet_priv == (void*)((struct packet_sock *)sk)->fanout)
                return true;
index 6aabd77d1cfdd5cddd34b55dc69699cae9956e57..564b9fc8efd3c8778ef8ba155cf17f79d92a9a80 100644 (file)
@@ -250,10 +250,11 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
                        else if ((cl = defmap[res.classid & TC_PRIO_MAX]) == NULL)
                                cl = defmap[TC_PRIO_BESTEFFORT];
 
-                       if (cl == NULL || cl->level >= head->level)
+                       if (cl == NULL)
                                goto fallback;
                }
-
+               if (cl->level >= head->level)
+                       goto fallback;
 #ifdef CONFIG_NET_CLS_ACT
                switch (result) {
                case TC_ACT_QUEUED:
index 9fc1c62ec80e1ad56b760b2820c0c4f2e495d4a0..4e606fcb2534929a2470e807816ac1089d81b7b2 100644 (file)
@@ -191,7 +191,6 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 
        if (list_empty(&flow->flowchain)) {
                list_add_tail(&flow->flowchain, &q->new_flows);
-               codel_vars_init(&flow->cvars);
                q->new_flow_count++;
                flow->deficit = q->quantum;
                flow->dropped = 0;
@@ -418,6 +417,7 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt)
                        struct fq_codel_flow *flow = q->flows + i;
 
                        INIT_LIST_HEAD(&flow->flowchain);
+                       codel_vars_init(&flow->cvars);
                }
        }
        if (sch->limit >= 1)
index e901583e4ea533581f13e0fb39599f72e57b3e4c..d42234c0f13bf4d4829930e0f6bcb30681ad4782 100644 (file)
@@ -102,9 +102,8 @@ static inline int gred_wred_mode_check(struct Qdisc *sch)
                if (q == NULL)
                        continue;
 
-               for (n = 0; n < table->DPs; n++)
-                       if (table->tab[n] && table->tab[n] != q &&
-                           table->tab[n]->prio == q->prio)
+               for (n = i + 1; n < table->DPs; n++)
+                       if (table->tab[n] && table->tab[n]->prio == q->prio)
                                return 1;
        }
 
@@ -137,6 +136,7 @@ static inline void gred_store_wred_set(struct gred_sched *table,
                                       struct gred_sched_data *q)
 {
        table->wred_set.qavg = q->vars.qavg;
+       table->wred_set.qidlestart = q->vars.qidlestart;
 }
 
 static inline int gred_use_ecn(struct gred_sched *t)
@@ -176,7 +176,7 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch)
                skb->tc_index = (skb->tc_index & ~GRED_VQ_MASK) | dp;
        }
 
-       /* sum up all the qaves of prios <= to ours to get the new qave */
+       /* sum up all the qaves of prios < ours to get the new qave */
        if (!gred_wred_mode(t) && gred_rio_mode(t)) {
                int i;
 
@@ -260,16 +260,18 @@ static struct sk_buff *gred_dequeue(struct Qdisc *sch)
                } else {
                        q->backlog -= qdisc_pkt_len(skb);
 
-                       if (!q->backlog && !gred_wred_mode(t))
-                               red_start_of_idle_period(&q->vars);
+                       if (gred_wred_mode(t)) {
+                               if (!sch->qstats.backlog)
+                                       red_start_of_idle_period(&t->wred_set);
+                       } else {
+                               if (!q->backlog)
+                                       red_start_of_idle_period(&q->vars);
+                       }
                }
 
                return skb;
        }
 
-       if (gred_wred_mode(t) && !red_is_idling(&t->wred_set))
-               red_start_of_idle_period(&t->wred_set);
-
        return NULL;
 }
 
@@ -291,19 +293,20 @@ static unsigned int gred_drop(struct Qdisc *sch)
                        q->backlog -= len;
                        q->stats.other++;
 
-                       if (!q->backlog && !gred_wred_mode(t))
-                               red_start_of_idle_period(&q->vars);
+                       if (gred_wred_mode(t)) {
+                               if (!sch->qstats.backlog)
+                                       red_start_of_idle_period(&t->wred_set);
+                       } else {
+                               if (!q->backlog)
+                                       red_start_of_idle_period(&q->vars);
+                       }
                }
 
                qdisc_drop(skb, sch);
                return len;
        }
 
-       if (gred_wred_mode(t) && !red_is_idling(&t->wred_set))
-               red_start_of_idle_period(&t->wred_set);
-
        return 0;
-
 }
 
 static void gred_reset(struct Qdisc *sch)
@@ -535,6 +538,7 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
        for (i = 0; i < MAX_DPs; i++) {
                struct gred_sched_data *q = table->tab[i];
                struct tc_gred_qopt opt;
+               unsigned long qavg;
 
                memset(&opt, 0, sizeof(opt));
 
@@ -566,7 +570,9 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
                if (gred_wred_mode(table))
                        gred_load_wred_set(table, q);
 
-               opt.qave = red_calc_qavg(&q->parms, &q->vars, q->vars.qavg);
+               qavg = red_calc_qavg(&q->parms, &q->vars,
+                                    q->vars.qavg >> q->parms.Wlog);
+               opt.qave = qavg >> q->parms.Wlog;
 
 append_opt:
                if (nla_append(skb, sizeof(opt), &opt) < 0)
index 838e18b4d7ea62cfd4d57e9d64a86630a0100f67..be50aa234dcdea30a5c7986eaeaae3570f64a6e3 100644 (file)
@@ -364,6 +364,25 @@ finish:
        return retval;
 }
 
+static void sctp_packet_release_owner(struct sk_buff *skb)
+{
+       sk_free(skb->sk);
+}
+
+static void sctp_packet_set_owner_w(struct sk_buff *skb, struct sock *sk)
+{
+       skb_orphan(skb);
+       skb->sk = sk;
+       skb->destructor = sctp_packet_release_owner;
+
+       /*
+        * The data chunks have already been accounted for in sctp_sendmsg(),
+        * therefore only reserve a single byte to keep socket around until
+        * the packet has been transmitted.
+        */
+       atomic_inc(&sk->sk_wmem_alloc);
+}
+
 /* All packets are sent to the network through this function from
  * sctp_outq_tail().
  *
@@ -405,7 +424,7 @@ int sctp_packet_transmit(struct sctp_packet *packet)
        /* Set the owning socket so that we know where to get the
         * destination IP address.
         */
-       skb_set_owner_w(nskb, sk);
+       sctp_packet_set_owner_w(nskb, sk);
 
        if (!sctp_transport_dst_check(tp)) {
                sctp_transport_route(tp, NULL, sctp_sk(sk));
index a5471f804d994ece8c31df6be0e04d5076b6dfd0..edc3c4af9085362c7227e31babfc19a489bf9cf6 100644 (file)
@@ -2604,7 +2604,7 @@ static int do_siocgstamp(struct net *net, struct socket *sock,
        err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv);
        set_fs(old_fs);
        if (!err)
-               err = compat_put_timeval(up, &ktv);
+               err = compat_put_timeval(&ktv, up);
 
        return err;
 }
@@ -2620,7 +2620,7 @@ static int do_siocgstampns(struct net *net, struct socket *sock,
        err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts);
        set_fs(old_fs);
        if (!err)
-               err = compat_put_timespec(up, &kts);
+               err = compat_put_timespec(&kts, up);
 
        return err;
 }
index a5a402a7d21f9e888b1c3f45b3bed09e8baf57e8..5d7f61d7559c9753c9bff0f29b371b5e62b5f0d9 100644 (file)
@@ -969,11 +969,11 @@ static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
        return false;
 }
 
-static void xprt_alloc_slot(struct rpc_task *task)
+void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
 {
-       struct rpc_xprt *xprt = task->tk_xprt;
        struct rpc_rqst *req;
 
+       spin_lock(&xprt->reserve_lock);
        if (!list_empty(&xprt->free)) {
                req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
                list_del(&req->rq_list);
@@ -994,12 +994,29 @@ static void xprt_alloc_slot(struct rpc_task *task)
        default:
                task->tk_status = -EAGAIN;
        }
+       spin_unlock(&xprt->reserve_lock);
        return;
 out_init_req:
        task->tk_status = 0;
        task->tk_rqstp = req;
        xprt_request_init(task, xprt);
+       spin_unlock(&xprt->reserve_lock);
+}
+EXPORT_SYMBOL_GPL(xprt_alloc_slot);
+
+void xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
+{
+       /* Note: grabbing the xprt_lock_write() ensures that we throttle
+        * new slot allocation if the transport is congested (i.e. when
+        * reconnecting a stream transport or when out of socket write
+        * buffer space).
+        */
+       if (xprt_lock_write(xprt, task)) {
+               xprt_alloc_slot(xprt, task);
+               xprt_release_write(xprt, task);
+       }
 }
+EXPORT_SYMBOL_GPL(xprt_lock_and_alloc_slot);
 
 static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
 {
@@ -1083,20 +1100,9 @@ void xprt_reserve(struct rpc_task *task)
        if (task->tk_rqstp != NULL)
                return;
 
-       /* Note: grabbing the xprt_lock_write() here is not strictly needed,
-        * but ensures that we throttle new slot allocation if the transport
-        * is congested (e.g. if reconnecting or if we're out of socket
-        * write buffer space).
-        */
        task->tk_timeout = 0;
        task->tk_status = -EAGAIN;
-       if (!xprt_lock_write(xprt, task))
-               return;
-
-       spin_lock(&xprt->reserve_lock);
-       xprt_alloc_slot(task);
-       spin_unlock(&xprt->reserve_lock);
-       xprt_release_write(xprt, task);
+       xprt->ops->alloc_slot(xprt, task);
 }
 
 static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt)
index 06cdbff79e4af433d5a4ea3a2766d6d09a9b5a01..5d9202dc7cb127f5158a2a6e275fa0656c18f0a6 100644 (file)
@@ -713,6 +713,7 @@ static void xprt_rdma_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
 static struct rpc_xprt_ops xprt_rdma_procs = {
        .reserve_xprt           = xprt_rdma_reserve_xprt,
        .release_xprt           = xprt_release_xprt_cong, /* sunrpc/xprt.c */
+       .alloc_slot             = xprt_alloc_slot,
        .release_request        = xprt_release_rqst_cong,       /* ditto */
        .set_retrans_timeout    = xprt_set_retrans_timeout_def, /* ditto */
        .rpcbind                = rpcb_getport_async,   /* sunrpc/rpcb_clnt.c */
index 400567243f84ba95e8f04d8a631a3b74539c53ea..a35b8e52e551d4b931110a78727604c8d6af8283 100644 (file)
@@ -2473,6 +2473,7 @@ static void bc_destroy(struct rpc_xprt *xprt)
 static struct rpc_xprt_ops xs_local_ops = {
        .reserve_xprt           = xprt_reserve_xprt,
        .release_xprt           = xs_tcp_release_xprt,
+       .alloc_slot             = xprt_alloc_slot,
        .rpcbind                = xs_local_rpcbind,
        .set_port               = xs_local_set_port,
        .connect                = xs_connect,
@@ -2489,6 +2490,7 @@ static struct rpc_xprt_ops xs_udp_ops = {
        .set_buffer_size        = xs_udp_set_buffer_size,
        .reserve_xprt           = xprt_reserve_xprt_cong,
        .release_xprt           = xprt_release_xprt_cong,
+       .alloc_slot             = xprt_alloc_slot,
        .rpcbind                = rpcb_getport_async,
        .set_port               = xs_set_port,
        .connect                = xs_connect,
@@ -2506,6 +2508,7 @@ static struct rpc_xprt_ops xs_udp_ops = {
 static struct rpc_xprt_ops xs_tcp_ops = {
        .reserve_xprt           = xprt_reserve_xprt,
        .release_xprt           = xs_tcp_release_xprt,
+       .alloc_slot             = xprt_lock_and_alloc_slot,
        .rpcbind                = rpcb_getport_async,
        .set_port               = xs_set_port,
        .connect                = xs_connect,
index 97026f3b215a1c85b3ddf0f0f6cf71636e76a586..1e37dbf00cb3f3850d3785827f896ca09339873b 100644 (file)
@@ -5633,8 +5633,10 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info)
                       sizeof(connect.ht_capa_mask));
 
        if (info->attrs[NL80211_ATTR_HT_CAPABILITY]) {
-               if (!info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK])
+               if (!info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK]) {
+                       kfree(connkeys);
                        return -EINVAL;
+               }
                memcpy(&connect.ht_capa,
                       nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]),
                       sizeof(connect.ht_capa));
index 54a0dc2e2f8d45d7a842be98882969f696c07ec2..ab2bb42fe094b7390d5135ec6e37b9113ea8219b 100644 (file)
@@ -212,7 +212,7 @@ resume:
                /* only the first xfrm gets the encap type */
                encap_type = 0;
 
-               if (async && x->repl->check(x, skb, seq)) {
+               if (async && x->repl->recheck(x, skb, seq)) {
                        XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR);
                        goto drop_unlock;
                }
index 2f6d11d04a2b29910a1f284d3e3af8b0db1bfcce..3efb07d3eb27425c8b9b5114c925eb9e7f402c9e 100644 (file)
@@ -420,6 +420,18 @@ err:
        return -EINVAL;
 }
 
+static int xfrm_replay_recheck_esn(struct xfrm_state *x,
+                                  struct sk_buff *skb, __be32 net_seq)
+{
+       if (unlikely(XFRM_SKB_CB(skb)->seq.input.hi !=
+                    htonl(xfrm_replay_seqhi(x, net_seq)))) {
+                       x->stats.replay_window++;
+                       return -EINVAL;
+       }
+
+       return xfrm_replay_check_esn(x, skb, net_seq);
+}
+
 static void xfrm_replay_advance_esn(struct xfrm_state *x, __be32 net_seq)
 {
        unsigned int bitnr, nr, i;
@@ -479,6 +491,7 @@ static void xfrm_replay_advance_esn(struct xfrm_state *x, __be32 net_seq)
 static struct xfrm_replay xfrm_replay_legacy = {
        .advance        = xfrm_replay_advance,
        .check          = xfrm_replay_check,
+       .recheck        = xfrm_replay_check,
        .notify         = xfrm_replay_notify,
        .overflow       = xfrm_replay_overflow,
 };
@@ -486,6 +499,7 @@ static struct xfrm_replay xfrm_replay_legacy = {
 static struct xfrm_replay xfrm_replay_bmp = {
        .advance        = xfrm_replay_advance_bmp,
        .check          = xfrm_replay_check_bmp,
+       .recheck        = xfrm_replay_check_bmp,
        .notify         = xfrm_replay_notify_bmp,
        .overflow       = xfrm_replay_overflow_bmp,
 };
@@ -493,6 +507,7 @@ static struct xfrm_replay xfrm_replay_bmp = {
 static struct xfrm_replay xfrm_replay_esn = {
        .advance        = xfrm_replay_advance_esn,
        .check          = xfrm_replay_check_esn,
+       .recheck        = xfrm_replay_recheck_esn,
        .notify         = xfrm_replay_notify_bmp,
        .overflow       = xfrm_replay_overflow_esn,
 };
index 87cd0e4d42829a19797ae3e2371c47723fd40fb7..210be48d8ae3c295a3f9e9642c356acd9866c92c 100644 (file)
@@ -1994,8 +1994,10 @@ int __xfrm_init_state(struct xfrm_state *x, bool init_replay)
                goto error;
 
        x->outer_mode = xfrm_get_mode(x->props.mode, family);
-       if (x->outer_mode == NULL)
+       if (x->outer_mode == NULL) {
+               err = -EPROTONOSUPPORT;
                goto error;
+       }
 
        if (init_replay) {
                err = xfrm_init_replay(x);
index 6bf8e87f1dcf124021083b25babe9aab317b7bbf..c3f69ae275d1f7710b5f38c02e9daf6b45b947a4 100644 (file)
@@ -42,7 +42,7 @@ quiet_cmd_install = INSTALL $(subst $(srctree)/,,$@)
 $(installed-fw-dirs):
        $(call cmd,mkdir)
 
-$(installed-fw): $(INSTALL_FW_PATH)/%: $(obj)/% | $(INSTALL_FW_PATH)/$$(dir %)
+$(installed-fw): $(INSTALL_FW_PATH)/%: $(obj)/% | $$(dir $(INSTALL_FW_PATH)/%)
        $(call cmd,install)
 
 PHONY +=  __fw_install __fw_modinst FORCE
index 4629038c9e5acb677499ace431355a818ee4b3d6..b3d907eb93a91e7437ec8be8dac4248990eaf1e0 100644 (file)
@@ -74,8 +74,13 @@ kallsyms()
        info KSYM ${2}
        local kallsymopt;
 
+       if [ -n "${CONFIG_SYMBOL_PREFIX}" ]; then
+               kallsymopt="${kallsymopt} \
+                           --symbol-prefix=${CONFIG_SYMBOL_PREFIX}"
+       fi
+
        if [ -n "${CONFIG_KALLSYMS_ALL}" ]; then
-               kallsymopt=--all-symbols
+               kallsymopt="${kallsymopt} --all-symbols"
        fi
 
        local aflags="${KBUILD_AFLAGS} ${KBUILD_AFLAGS_KERNEL}               \
@@ -211,7 +216,7 @@ if [ -n "${CONFIG_KALLSYMS}" ]; then
 
        if ! cmp -s System.map .tmp_System.map; then
                echo >&2 Inconsistent kallsyms data
-               echo >&2 echo Try "make KALLSYMS_EXTRA_PASS=1" as a workaround
+               echo >&2 Try "make KALLSYMS_EXTRA_PASS=1" as a workaround
                cleanup
                exit 1
        fi
index ec2118d0e27aca3f5fef6c2ddd72f8b166ce98ca..eb60cb8dbb8a6f12d965912e923197cdd2f4e8b5 100644 (file)
@@ -80,14 +80,12 @@ static int snd_compr_open(struct inode *inode, struct file *f)
        int maj = imajor(inode);
        int ret;
 
-       if (f->f_flags & O_WRONLY)
+       if ((f->f_flags & O_ACCMODE) == O_WRONLY)
                dirn = SND_COMPRESS_PLAYBACK;
-       else if (f->f_flags & O_RDONLY)
+       else if ((f->f_flags & O_ACCMODE) == O_RDONLY)
                dirn = SND_COMPRESS_CAPTURE;
-       else {
-               pr_err("invalid direction\n");
+       else
                return -EINVAL;
-       }
 
        if (maj == snd_major)
                compr = snd_lookup_minor_data(iminor(inode),
index f560051a949e943e10efb844364d5a978a587fcd..1c65cc5e3a31101d098d6cdb48a2772d1adc38f9 100644 (file)
@@ -1209,6 +1209,9 @@ static void snd_hda_codec_free(struct hda_codec *codec)
        kfree(codec);
 }
 
+static bool snd_hda_codec_get_supported_ps(struct hda_codec *codec,
+                               hda_nid_t fg, unsigned int power_state);
+
 static void hda_set_power_state(struct hda_codec *codec, hda_nid_t fg,
                                unsigned int power_state);
 
@@ -1317,6 +1320,10 @@ int /*__devinit*/ snd_hda_codec_new(struct hda_bus *bus,
                                           AC_VERB_GET_SUBSYSTEM_ID, 0);
        }
 
+       codec->epss = snd_hda_codec_get_supported_ps(codec,
+                                       codec->afg ? codec->afg : codec->mfg,
+                                       AC_PWRST_EPSS);
+
        /* power-up all before initialization */
        hda_set_power_state(codec,
                            codec->afg ? codec->afg : codec->mfg,
@@ -2346,6 +2353,7 @@ int snd_hda_codec_reset(struct hda_codec *codec)
        }
        if (codec->patch_ops.free)
                codec->patch_ops.free(codec);
+       memset(&codec->patch_ops, 0, sizeof(codec->patch_ops));
        snd_hda_jack_tbl_clear(codec);
        codec->proc_widget_hook = NULL;
        codec->spec = NULL;
@@ -2361,7 +2369,6 @@ int snd_hda_codec_reset(struct hda_codec *codec)
        codec->num_pcms = 0;
        codec->pcm_info = NULL;
        codec->preset = NULL;
-       memset(&codec->patch_ops, 0, sizeof(codec->patch_ops));
        codec->slave_dig_outs = NULL;
        codec->spdif_status_reset = 0;
        module_put(codec->owner);
@@ -3543,8 +3550,7 @@ static void hda_set_power_state(struct hda_codec *codec, hda_nid_t fg,
        /* this delay seems necessary to avoid click noise at power-down */
        if (power_state == AC_PWRST_D3) {
                /* transition time less than 10ms for power down */
-               bool epss = snd_hda_codec_get_supported_ps(codec, fg, AC_PWRST_EPSS);
-               msleep(epss ? 10 : 100);
+               msleep(codec->epss ? 10 : 100);
        }
 
        /* repeat power states setting at most 10 times*/
index 7fbc1bcaf1a9593b6e0ec71b8b5ac2bb5262e336..e5a7e19a80712c0ece3ad1b41617e40512206ad3 100644 (file)
@@ -862,6 +862,7 @@ struct hda_codec {
        unsigned int ignore_misc_bit:1; /* ignore MISC_NO_PRESENCE bit */
        unsigned int no_jack_detect:1;  /* Machine has no jack-detection */
        unsigned int pcm_format_first:1; /* PCM format must be set first */
+       unsigned int epss:1;            /* supporting EPSS? */
 #ifdef CONFIG_SND_HDA_POWER_SAVE
        unsigned int power_on :1;       /* current (global) power-state */
        int power_transition;   /* power-state in transition */
index 60882c62f18006a3b2339d354b58550fdcd30718..c4763c52eaf64ced9e1d520f80fd8b245507dbd6 100644 (file)
@@ -2701,6 +2701,8 @@ static struct snd_pci_quirk position_fix_list[] __devinitdata = {
        SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB),
        SND_PCI_QUIRK(0x1043, 0x81b3, "ASUS", POS_FIX_LPIB),
        SND_PCI_QUIRK(0x1043, 0x81e7, "ASUS M2V", POS_FIX_LPIB),
+       SND_PCI_QUIRK(0x1043, 0x1ac3, "ASUS X53S", POS_FIX_POSBUF),
+       SND_PCI_QUIRK(0x1043, 0x1b43, "ASUS K53E", POS_FIX_POSBUF),
        SND_PCI_QUIRK(0x104d, 0x9069, "Sony VPCS11V9E", POS_FIX_LPIB),
        SND_PCI_QUIRK(0x10de, 0xcb89, "Macbook Pro 7,1", POS_FIX_LPIB),
        SND_PCI_QUIRK(0x1297, 0x3166, "Shuttle", POS_FIX_LPIB),
index ea5775a1a7db292e25dd7956b8ac502d1a9db33e..3d4722f0a1cacba8fc9a923f22dbf767433dbd6c 100644 (file)
@@ -1075,7 +1075,7 @@ static struct snd_kcontrol_new stac_smux_mixer = {
 
 static const char * const slave_pfxs[] = {
        "Front", "Surround", "Center", "LFE", "Side",
-       "Headphone", "Speaker", "IEC958",
+       "Headphone", "Speaker", "IEC958", "PCM",
        NULL
 };
 
@@ -4543,6 +4543,9 @@ static void stac92xx_line_out_detect(struct hda_codec *codec,
        struct auto_pin_cfg *cfg = &spec->autocfg;
        int i;
 
+       if (cfg->speaker_outs == 0)
+               return;
+
        for (i = 0; i < cfg->line_outs; i++) {
                if (presence)
                        break;
@@ -5531,6 +5534,7 @@ static int patch_stac92hd83xxx(struct hda_codec *codec)
                snd_hda_codec_set_pincfg(codec, 0xf, 0x2181205e);
        }
 
+       codec->epss = 0; /* longer delay needed for D3 */
        codec->no_trigger_sense = 1;
        codec->spec = spec;
 
index 764cc93dbca402f6372b6f73a0854470f3046f73..075d5aa1fee003bef0dfaa4177ac21d010247277 100644 (file)
@@ -297,6 +297,7 @@ static int ak4396_dac_vol_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem
 }
 
 static const DECLARE_TLV_DB_SCALE(db_scale_wm_dac, -12700, 100, 1);
+static const DECLARE_TLV_DB_LINEAR(ak4396_db_scale, TLV_DB_GAIN_MUTE, 0);
 
 static struct snd_kcontrol_new prodigy_hd2_controls[] __devinitdata = {
     {
@@ -307,7 +308,7 @@ static struct snd_kcontrol_new prodigy_hd2_controls[] __devinitdata = {
        .info = ak4396_dac_vol_info,
        .get = ak4396_dac_vol_get,
        .put = ak4396_dac_vol_put,
-       .tlv = { .p = db_scale_wm_dac },
+       .tlv = { .p = ak4396_db_scale },
     },
 };
 
index 5c9cacaf2d525cddabffd0b416c0695dfadaab99..1cf7a32d1b211e779cb7705746c32b07c11afa82 100644 (file)
@@ -426,7 +426,7 @@ static const int arizona_44k1_bclk_rates[] = {
        940800,
        1411200,
        1881600,
-       2882400,
+       2822400,
        3763200,
        5644800,
        7526400,
index 8f726c063f42badc9c598de4af670fb78f8937e1..115a403018105b0eaa2fb64abee56a47b5d32cc4 100644 (file)
@@ -659,7 +659,7 @@ static struct snd_soc_dai_driver mc13783_dai_async[] = {
                .id = MC13783_ID_STEREO_DAC,
                .playback = {
                        .stream_name = "Playback",
-                       .channels_min = 1,
+                       .channels_min = 2,
                        .channels_max = 2,
                        .rates = SNDRV_PCM_RATE_8000_96000,
                        .formats = MC13783_FORMATS,
@@ -670,7 +670,7 @@ static struct snd_soc_dai_driver mc13783_dai_async[] = {
                .id = MC13783_ID_STEREO_CODEC,
                .capture = {
                        .stream_name = "Capture",
-                       .channels_min = 1,
+                       .channels_min = 2,
                        .channels_max = 2,
                        .rates = MC13783_RATES_RECORD,
                        .formats = MC13783_FORMATS,
@@ -692,14 +692,14 @@ static struct snd_soc_dai_driver mc13783_dai_sync[] = {
                .id = MC13783_ID_SYNC,
                .playback = {
                        .stream_name = "Playback",
-                       .channels_min = 1,
+                       .channels_min = 2,
                        .channels_max = 2,
                        .rates = SNDRV_PCM_RATE_8000_96000,
                        .formats = MC13783_FORMATS,
                },
                .capture = {
                        .stream_name = "Capture",
-                       .channels_min = 1,
+                       .channels_min = 2,
                        .channels_max = 2,
                        .rates = MC13783_RATES_RECORD,
                        .formats = MC13783_FORMATS,
index 0013afe48e66a83171b93691b46593cb2e64d7ea..dc4262eea4b711990c71e602fe1a0fba7928250b 100644 (file)
@@ -100,7 +100,7 @@ static const struct reg_default wm8904_reg_defaults[] = {
        { 14,  0x0000 },     /* R14  - Power Management 2 */
        { 15,  0x0000 },     /* R15  - Power Management 3 */
        { 18,  0x0000 },     /* R18  - Power Management 6 */
-       { 19,  0x945E },     /* R20  - Clock Rates 0 */
+       { 20,  0x945E },     /* R20  - Clock Rates 0 */
        { 21,  0x0C05 },     /* R21  - Clock Rates 1 */
        { 22,  0x0006 },     /* R22  - Clock Rates 2 */
        { 24,  0x0050 },     /* R24  - Audio Interface 0 */
index fb21b17f17f54ae342b552e05b42df6a0afcaeca..199408ec42612dfe57f63e3933457e901cf7bcfa 100644 (file)
@@ -94,7 +94,7 @@ static int __devinit imx_sgtl5000_probe(struct platform_device *pdev)
                dev_err(&pdev->dev, "audmux internal port setup failed\n");
                return ret;
        }
-       imx_audmux_v2_configure_port(ext_port,
+       ret = imx_audmux_v2_configure_port(ext_port,
                        IMX_AUDMUX_V2_PTCR_SYN,
                        IMX_AUDMUX_V2_PDCR_RXDSEL(int_port));
        if (ret) {
index 009533ab8d1894054ed07c05ae140362d6738582..df65f98211ec2bdc3c664f7a87443f8477dc5476 100644 (file)
@@ -59,7 +59,7 @@ static int am3517evm_hw_params(struct snd_pcm_substream *substream,
                return ret;
        }
 
-       snd_soc_dai_set_sysclk(cpu_dai, OMAP_MCBSP_FSR_SRC_FSX, 0,
+       ret = snd_soc_dai_set_sysclk(cpu_dai, OMAP_MCBSP_FSR_SRC_FSX, 0,
                                SND_SOC_CLOCK_IN);
        if (ret < 0) {
                printk(KERN_ERR "can't set CPU system clock OMAP_MCBSP_FSR_SRC_FSX\n");
index f3ebc38c10fe7633ea5ec260562b64bfdb7e402f..b70964ea448cef264bf540597850558e7c9fd23c 100644 (file)
@@ -34,9 +34,7 @@ static const struct snd_pcm_hardware dma_hardware = {
        .info                   = SNDRV_PCM_INFO_INTERLEAVED |
                                    SNDRV_PCM_INFO_BLOCK_TRANSFER |
                                    SNDRV_PCM_INFO_MMAP |
-                                   SNDRV_PCM_INFO_MMAP_VALID |
-                                   SNDRV_PCM_INFO_PAUSE |
-                                   SNDRV_PCM_INFO_RESUME,
+                                   SNDRV_PCM_INFO_MMAP_VALID,
        .formats                = SNDRV_PCM_FMTBIT_S16_LE |
                                    SNDRV_PCM_FMTBIT_U16_LE |
                                    SNDRV_PCM_FMTBIT_U8 |
@@ -248,15 +246,11 @@ static int dma_trigger(struct snd_pcm_substream *substream, int cmd)
 
        switch (cmd) {
        case SNDRV_PCM_TRIGGER_START:
-       case SNDRV_PCM_TRIGGER_RESUME:
-       case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
                prtd->state |= ST_RUNNING;
                prtd->params->ops->trigger(prtd->params->ch);
                break;
 
        case SNDRV_PCM_TRIGGER_STOP:
-       case SNDRV_PCM_TRIGGER_SUSPEND:
-       case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
                prtd->state &= ~ST_RUNNING;
                prtd->params->ops->stop(prtd->params->ch);
                break;
index dd7c49fafd754f949014f88b9a2ad0b80d59cc20..f90139b5f50d74089dc98fd0d19de42cb3732a84 100644 (file)
@@ -291,8 +291,11 @@ static int snd_soc_dapm_set_bias_level(struct snd_soc_dapm_context *dapm,
                if (dapm->codec->driver->set_bias_level)
                        ret = dapm->codec->driver->set_bias_level(dapm->codec,
                                                                  level);
-       } else
+               else
+                       dapm->bias_level = level;
+       } else if (!card || dapm != &card->dapm) {
                dapm->bias_level = level;
+       }
 
        if (ret != 0)
                goto out;
index 97c2cac8e92c726712746d5db4658d7f0898e441..8c7f23729446b1582f6d7625d5933aad905e45d7 100644 (file)
@@ -138,7 +138,7 @@ static void spear_pcm_free(struct snd_pcm *pcm)
                        continue;
 
                buf = &substream->dma_buffer;
-               if (!buf && !buf->area)
+               if (!buf || !buf->area)
                        continue;
 
                dma_free_writecombine(pcm->card->dev, buf->bytes,
index e463529b38bbfbfd35cc745bf430f68399bfee91..76cb1b363b71c2ce2d1be75c27113c3cf2154127 100644 (file)
@@ -89,7 +89,6 @@ static struct snd_soc_jack_gpio tegra_alc5632_hp_jack_gpio = {
        .name = "Headset detection",
        .report = SND_JACK_HEADSET,
        .debounce_time = 150,
-       .invert = 1,
 };
 
 static const struct snd_soc_dapm_widget tegra_alc5632_dapm_widgets[] = {
index 5658bcec1931ce5a76a75a32b020730f41da8c13..8d6900c1ee47e8d1a263584bf470e5c2eec56af4 100644 (file)
@@ -334,11 +334,11 @@ static int tegra_pcm_hw_params(struct snd_pcm_substream *substream,
        if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
                slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
                slave_config.dst_addr = dmap->addr;
-               slave_config.src_maxburst = 0;
+               slave_config.dst_maxburst = 4;
        } else {
                slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
                slave_config.src_addr = dmap->addr;
-               slave_config.dst_maxburst = 0;
+               slave_config.src_maxburst = 4;
        }
        slave_config.slave_id = dmap->req_sel;
 
index 5c472f335a64d6e5c11a5ee82e755153312ee40b..eb85113d472a22503aceb1939d5191d9d2c856de 100644 (file)
@@ -663,7 +663,6 @@ int ux500_msp_i2s_init_msp(struct platform_device *pdev,
                        struct ux500_msp **msp_p,
                        struct msp_i2s_platform_data *platform_data)
 {
-       int ret = 0;
        struct resource *res = NULL;
        struct i2s_controller *i2s_cont;
        struct ux500_msp *msp;
@@ -685,15 +684,14 @@ int ux500_msp_i2s_init_msp(struct platform_device *pdev,
        if (res == NULL) {
                dev_err(&pdev->dev, "%s: ERROR: Unable to get resource!\n",
                        __func__);
-               ret = -ENOMEM;
-               goto err_res;
+               return -ENOMEM;
        }
 
-       msp->registers = ioremap(res->start, (res->end - res->start + 1));
+       msp->registers = devm_ioremap(&pdev->dev, res->start,
+                                     resource_size(res));
        if (msp->registers == NULL) {
                dev_err(&pdev->dev, "%s: ERROR: ioremap failed!\n", __func__);
-               ret = -ENOMEM;
-               goto err_res;
+               return -ENOMEM;
        }
 
        msp->msp_state = MSP_STATE_IDLE;
@@ -705,7 +703,7 @@ int ux500_msp_i2s_init_msp(struct platform_device *pdev,
                dev_err(&pdev->dev,
                        "%s: ERROR: Failed to allocate I2S-controller!\n",
                        __func__);
-               goto err_i2s_cont;
+               return -ENOMEM;
        }
        i2s_cont->dev.parent = &pdev->dev;
        i2s_cont->data = (void *)msp;
@@ -716,14 +714,6 @@ int ux500_msp_i2s_init_msp(struct platform_device *pdev,
        msp->i2s_cont = i2s_cont;
 
        return 0;
-
-err_i2s_cont:
-       iounmap(msp->registers);
-
-err_res:
-       devm_kfree(&pdev->dev, msp);
-
-       return ret;
 }
 
 void ux500_msp_i2s_cleanup_msp(struct platform_device *pdev,
@@ -732,11 +722,6 @@ void ux500_msp_i2s_cleanup_msp(struct platform_device *pdev,
        dev_dbg(msp->dev, "%s: Enter (id = %d).\n", __func__, msp->id);
 
        device_unregister(&msp->i2s_cont->dev);
-       devm_kfree(&pdev->dev, msp->i2s_cont);
-
-       iounmap(msp->registers);
-
-       devm_kfree(&pdev->dev, msp);
 }
 
 MODULE_LICENSE("GPL v2");
index d5b5c3388e28ced3cb7c287399755b56d90e1aed..4a469f0cb6d4750b03fab852d13515022fe0b4af 100644 (file)
@@ -553,7 +553,7 @@ static void snd_usb_audio_disconnect(struct usb_device *dev,
                                     struct snd_usb_audio *chip)
 {
        struct snd_card *card;
-       struct list_head *p;
+       struct list_head *p, *n;
 
        if (chip == (void *)-1L)
                return;
@@ -570,7 +570,7 @@ static void snd_usb_audio_disconnect(struct usb_device *dev,
                        snd_usb_stream_disconnect(p);
                }
                /* release the endpoint resources */
-               list_for_each(p, &chip->ep_list) {
+               list_for_each_safe(p, n, &chip->ep_list) {
                        snd_usb_endpoint_free(p);
                }
                /* release the midi resources */
index c411812026884408afd74f1e580f2e87bbe747fc..d6e2bb49c59c52f01288925fa52461eea510e78a 100644 (file)
@@ -141,7 +141,7 @@ int snd_usb_endpoint_implict_feedback_sink(struct snd_usb_endpoint *ep)
  *
  * For implicit feedback, next_packet_size() is unused.
  */
-static int next_packet_size(struct snd_usb_endpoint *ep)
+int snd_usb_endpoint_next_packet_size(struct snd_usb_endpoint *ep)
 {
        unsigned long flags;
        int ret;
@@ -177,15 +177,6 @@ static void retire_inbound_urb(struct snd_usb_endpoint *ep,
                ep->retire_data_urb(ep->data_subs, urb);
 }
 
-static void prepare_outbound_urb_sizes(struct snd_usb_endpoint *ep,
-                                      struct snd_urb_ctx *ctx)
-{
-       int i;
-
-       for (i = 0; i < ctx->packets; ++i)
-               ctx->packet_size[i] = next_packet_size(ep);
-}
-
 /*
  * Prepare a PLAYBACK urb for submission to the bus.
  */
@@ -370,7 +361,6 @@ static void snd_complete_urb(struct urb *urb)
                        goto exit_clear;
                }
 
-               prepare_outbound_urb_sizes(ep, ctx);
                prepare_outbound_urb(ep, ctx);
        } else {
                retire_inbound_urb(ep, ctx);
@@ -799,7 +789,9 @@ int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep,
 /**
  * snd_usb_endpoint_start: start an snd_usb_endpoint
  *
- * @ep: the endpoint to start
+ * @ep:                the endpoint to start
+ * @can_sleep: flag indicating whether the operation is executed in
+ *             non-atomic context
  *
  * A call to this function will increment the use count of the endpoint.
  * In case it is not already running, the URBs for this endpoint will be
@@ -809,7 +801,7 @@ int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep,
  *
  * Returns an error if the URB submission failed, 0 in all other cases.
  */
-int snd_usb_endpoint_start(struct snd_usb_endpoint *ep)
+int snd_usb_endpoint_start(struct snd_usb_endpoint *ep, int can_sleep)
 {
        int err;
        unsigned int i;
@@ -821,6 +813,11 @@ int snd_usb_endpoint_start(struct snd_usb_endpoint *ep)
        if (++ep->use_count != 1)
                return 0;
 
+       /* just to be sure */
+       deactivate_urbs(ep, 0, can_sleep);
+       if (can_sleep)
+               wait_clear_urbs(ep);
+
        ep->active_mask = 0;
        ep->unlink_mask = 0;
        ep->phase = 0;
@@ -850,7 +847,6 @@ int snd_usb_endpoint_start(struct snd_usb_endpoint *ep)
                        goto __error;
 
                if (usb_pipeout(ep->pipe)) {
-                       prepare_outbound_urb_sizes(ep, urb->context);
                        prepare_outbound_urb(ep, urb->context);
                } else {
                        prepare_inbound_urb(ep, urb->context);
index ee2723fb174f61ddb9a3c0c0224bc065b36ba173..cbbbdf226d66b6fa9eec51fe829a1b03cdd8d518 100644 (file)
@@ -13,7 +13,7 @@ int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep,
                                struct audioformat *fmt,
                                struct snd_usb_endpoint *sync_ep);
 
-int  snd_usb_endpoint_start(struct snd_usb_endpoint *ep);
+int  snd_usb_endpoint_start(struct snd_usb_endpoint *ep, int can_sleep);
 void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep,
                           int force, int can_sleep, int wait);
 int  snd_usb_endpoint_activate(struct snd_usb_endpoint *ep);
@@ -21,6 +21,7 @@ int  snd_usb_endpoint_deactivate(struct snd_usb_endpoint *ep);
 void snd_usb_endpoint_free(struct list_head *head);
 
 int snd_usb_endpoint_implict_feedback_sink(struct snd_usb_endpoint *ep);
+int snd_usb_endpoint_next_packet_size(struct snd_usb_endpoint *ep);
 
 void snd_usb_handle_sync_urb(struct snd_usb_endpoint *ep,
                             struct snd_usb_endpoint *sender,
index 62ec808ed792503e789ea26eb74837998623915e..f782ce19bf5aa14be92a43df553d66b177cb9429 100644 (file)
@@ -212,7 +212,7 @@ int snd_usb_init_pitch(struct snd_usb_audio *chip, int iface,
        }
 }
 
-static int start_endpoints(struct snd_usb_substream *subs)
+static int start_endpoints(struct snd_usb_substream *subs, int can_sleep)
 {
        int err;
 
@@ -225,7 +225,7 @@ static int start_endpoints(struct snd_usb_substream *subs)
                snd_printdd(KERN_DEBUG "Starting data EP @%p\n", ep);
 
                ep->data_subs = subs;
-               err = snd_usb_endpoint_start(ep);
+               err = snd_usb_endpoint_start(ep, can_sleep);
                if (err < 0) {
                        clear_bit(SUBSTREAM_FLAG_DATA_EP_STARTED, &subs->flags);
                        return err;
@@ -236,10 +236,25 @@ static int start_endpoints(struct snd_usb_substream *subs)
            !test_and_set_bit(SUBSTREAM_FLAG_SYNC_EP_STARTED, &subs->flags)) {
                struct snd_usb_endpoint *ep = subs->sync_endpoint;
 
+               if (subs->data_endpoint->iface != subs->sync_endpoint->iface ||
+                   subs->data_endpoint->alt_idx != subs->sync_endpoint->alt_idx) {
+                       err = usb_set_interface(subs->dev,
+                                               subs->sync_endpoint->iface,
+                                               subs->sync_endpoint->alt_idx);
+                       if (err < 0) {
+                               snd_printk(KERN_ERR
+                                          "%d:%d:%d: cannot set interface (%d)\n",
+                                          subs->dev->devnum,
+                                          subs->sync_endpoint->iface,
+                                          subs->sync_endpoint->alt_idx, err);
+                               return -EIO;
+                       }
+               }
+
                snd_printdd(KERN_DEBUG "Starting sync EP @%p\n", ep);
 
                ep->sync_slave = subs->data_endpoint;
-               err = snd_usb_endpoint_start(ep);
+               err = snd_usb_endpoint_start(ep, can_sleep);
                if (err < 0) {
                        clear_bit(SUBSTREAM_FLAG_SYNC_EP_STARTED, &subs->flags);
                        return err;
@@ -544,13 +559,10 @@ static int snd_usb_pcm_prepare(struct snd_pcm_substream *substream)
        subs->last_frame_number = 0;
        runtime->delay = 0;
 
-       /* clear the pending deactivation on the target EPs */
-       deactivate_endpoints(subs);
-
        /* for playback, submit the URBs now; otherwise, the first hwptr_done
         * updates for all URBs would happen at the same time when starting */
        if (subs->direction == SNDRV_PCM_STREAM_PLAYBACK)
-               return start_endpoints(subs);
+               return start_endpoints(subs, 1);
 
        return 0;
 }
@@ -1032,6 +1044,7 @@ static void prepare_playback_urb(struct snd_usb_substream *subs,
                                 struct urb *urb)
 {
        struct snd_pcm_runtime *runtime = subs->pcm_substream->runtime;
+       struct snd_usb_endpoint *ep = subs->data_endpoint;
        struct snd_urb_ctx *ctx = urb->context;
        unsigned int counts, frames, bytes;
        int i, stride, period_elapsed = 0;
@@ -1043,7 +1056,11 @@ static void prepare_playback_urb(struct snd_usb_substream *subs,
        urb->number_of_packets = 0;
        spin_lock_irqsave(&subs->lock, flags);
        for (i = 0; i < ctx->packets; i++) {
-               counts = ctx->packet_size[i];
+               if (ctx->packet_size[i])
+                       counts = ctx->packet_size[i];
+               else
+                       counts = snd_usb_endpoint_next_packet_size(ep);
+
                /* set up descriptor */
                urb->iso_frame_desc[i].offset = frames * stride;
                urb->iso_frame_desc[i].length = counts * stride;
@@ -1094,7 +1111,16 @@ static void prepare_playback_urb(struct snd_usb_substream *subs,
        subs->hwptr_done += bytes;
        if (subs->hwptr_done >= runtime->buffer_size * stride)
                subs->hwptr_done -= runtime->buffer_size * stride;
+
+       /* update delay with exact number of samples queued */
+       runtime->delay = subs->last_delay;
        runtime->delay += frames;
+       subs->last_delay = runtime->delay;
+
+       /* realign last_frame_number */
+       subs->last_frame_number = usb_get_current_frame_number(subs->dev);
+       subs->last_frame_number &= 0xFF; /* keep 8 LSBs */
+
        spin_unlock_irqrestore(&subs->lock, flags);
        urb->transfer_buffer_length = bytes;
        if (period_elapsed)
@@ -1112,12 +1138,32 @@ static void retire_playback_urb(struct snd_usb_substream *subs,
        struct snd_pcm_runtime *runtime = subs->pcm_substream->runtime;
        int stride = runtime->frame_bits >> 3;
        int processed = urb->transfer_buffer_length / stride;
+       int est_delay;
+
+       /* ignore the delay accounting when procssed=0 is given, i.e.
+        * silent payloads are procssed before handling the actual data
+        */
+       if (!processed)
+               return;
 
        spin_lock_irqsave(&subs->lock, flags);
-       if (processed > runtime->delay)
-               runtime->delay = 0;
+       est_delay = snd_usb_pcm_delay(subs, runtime->rate);
+       /* update delay with exact number of samples played */
+       if (processed > subs->last_delay)
+               subs->last_delay = 0;
        else
-               runtime->delay -= processed;
+               subs->last_delay -= processed;
+       runtime->delay = subs->last_delay;
+
+       /*
+        * Report when delay estimate is off by more than 2ms.
+        * The error should be lower than 2ms since the estimate relies
+        * on two reads of a counter updated every ms.
+        */
+       if (abs(est_delay - subs->last_delay) * 1000 > runtime->rate * 2)
+               snd_printk(KERN_DEBUG "delay: estimated %d, actual %d\n",
+                       est_delay, subs->last_delay);
+
        spin_unlock_irqrestore(&subs->lock, flags);
 }
 
@@ -1175,7 +1221,7 @@ static int snd_usb_substream_capture_trigger(struct snd_pcm_substream *substream
 
        switch (cmd) {
        case SNDRV_PCM_TRIGGER_START:
-               err = start_endpoints(subs);
+               err = start_endpoints(subs, 0);
                if (err < 0)
                        return err;